Compare commits

..

No commits in common. "main" and "main-0.9.x" have entirely different histories.

210 changed files with 12635 additions and 10062 deletions

3
.cargo/config.toml Normal file
View File

@ -0,0 +1,3 @@
[target.x86_64-unknown-linux-gnu]
linker = "clang"
rustflags = ["-C", "link-arg=-fuse-ld=mold"]

View File

@ -5,7 +5,6 @@ when:
- pull_request - pull_request
- deployment - deployment
- cron - cron
- manual
steps: steps:
- name: check formatting - name: check formatting
@ -16,21 +15,34 @@ steps:
- name: build - name: build
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.dev - nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- name: unit + func tests (lmdb) - name: unit + func tests
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
environment:
GARAGE_TEST_INTEGRATION_EXE: result-bin/bin/garage
GARAGE_TEST_INTEGRATION_PATH: tmp-garage-integration
commands: commands:
- nix-build -j4 --attr flakePackages.tests-lmdb - nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-build --no-build-output --attr test.amd64
- name: unit + func tests (sqlite) - ./result/bin/garage_db-*
image: nixpkgs/nix:nixos-22.05 - ./result/bin/garage_api-*
commands: - ./result/bin/garage_model-*
- nix-build -j4 --attr flakePackages.tests-sqlite - ./result/bin/garage_rpc-*
- ./result/bin/garage_table-*
- ./result/bin/garage_util-*
- ./result/bin/garage_web-*
- ./result/bin/garage-*
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sled ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
- nix-shell --attr ci --run "killall -9 garage" || true
- GARAGE_TEST_INTEGRATION_DB_ENGINE=lmdb ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
- nix-shell --attr ci --run "killall -9 garage" || true
- GARAGE_TEST_INTEGRATION_DB_ENGINE=sqlite ./result/bin/integration-* || (cat tmp-garage-integration/stderr.log; false)
- rm result
- rm -rv tmp-garage-integration
- name: integration tests - name: integration tests
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build -j4 --attr flakePackages.dev - nix-build --no-build-output --attr clippy.amd64 --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false) - nix-shell --attr ci --run ./script/test-smoke.sh || (cat /tmp/garage.log; false)
depends_on: [ build ]

View File

@ -9,20 +9,19 @@ depends_on:
steps: steps:
- name: refresh-index - name: refresh-index
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
environment: secrets:
AWS_ACCESS_KEY_ID: - source: garagehq_aws_access_key_id
from_secret: garagehq_aws_access_key_id target: AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: - source: garagehq_aws_secret_access_key
from_secret: garagehq_aws_secret_access_key target: AWS_SECRET_ACCESS_KEY
commands: commands:
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf - mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
- nix-shell --attr ci --run "refresh_index" - nix-shell --attr ci --run "refresh_index"
- name: multiarch-docker - name: multiarch-docker
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
environment: secrets:
DOCKER_AUTH: - docker_auth
from_secret: docker_auth
commands: commands:
- mkdir -p /root/.docker - mkdir -p /root/.docker
- echo $DOCKER_AUTH > /root/.docker/config.json - echo $DOCKER_AUTH > /root/.docker/config.json

View File

@ -18,12 +18,13 @@ steps:
- name: build - name: build
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-build --attr releasePackages.${ARCH} --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA} - nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- name: check is static binary - name: check is static binary
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
commands: commands:
- nix-shell --attr ci --run "./script/not-dynamic.sh result/bin/garage" - nix-build --no-build-output --attr pkgs.${ARCH}.release --argstr git_version ${CI_COMMIT_TAG:-$CI_COMMIT_SHA}
- nix-shell --attr ci --run "./script/not-dynamic.sh result-bin/bin/garage"
- name: integration tests - name: integration tests
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
@ -47,10 +48,11 @@ steps:
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
TARGET: "${TARGET}" TARGET: "${TARGET}"
AWS_ACCESS_KEY_ID: secrets:
from_secret: garagehq_aws_access_key_id - source: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY: target: AWS_ACCESS_KEY_ID
from_secret: garagehq_aws_secret_access_key - source: garagehq_aws_secret_access_key
target: AWS_SECRET_ACCESS_KEY
commands: commands:
- nix-shell --attr ci --run "to_s3" - nix-shell --attr ci --run "to_s3"
@ -59,8 +61,8 @@ steps:
environment: environment:
DOCKER_PLATFORM: "linux/${ARCH}" DOCKER_PLATFORM: "linux/${ARCH}"
CONTAINER_NAME: "dxflrs/${ARCH}_garage" CONTAINER_NAME: "dxflrs/${ARCH}_garage"
DOCKER_AUTH: secrets:
from_secret: docker_auth - docker_auth
commands: commands:
- mkdir -p /root/.docker - mkdir -p /root/.docker
- echo $DOCKER_AUTH > /root/.docker/config.json - echo $DOCKER_AUTH > /root/.docker/config.json

1841
Cargo.lock generated

File diff suppressed because it is too large Load Diff

7027
Cargo.nix Normal file

File diff suppressed because it is too large Load Diff

View File

@ -8,10 +8,7 @@ members = [
"src/table", "src/table",
"src/block", "src/block",
"src/model", "src/model",
"src/api/common", "src/api",
"src/api/s3",
"src/api/k2v",
"src/api/admin",
"src/web", "src/web",
"src/garage", "src/garage",
"src/k2v-client", "src/k2v-client",
@ -24,18 +21,15 @@ default-members = ["src/garage"]
# Internal Garage crates # Internal Garage crates
format_table = { version = "0.1.1", path = "src/format-table" } format_table = { version = "0.1.1", path = "src/format-table" }
garage_api_common = { version = "1.0.1", path = "src/api/common" } garage_api = { version = "0.9.4", path = "src/api" }
garage_api_admin = { version = "1.0.1", path = "src/api/admin" } garage_block = { version = "0.9.4", path = "src/block" }
garage_api_s3 = { version = "1.0.1", path = "src/api/s3" } garage_db = { version = "0.9.4", path = "src/db", default-features = false }
garage_api_k2v = { version = "1.0.1", path = "src/api/k2v" } garage_model = { version = "0.9.4", path = "src/model", default-features = false }
garage_block = { version = "1.0.1", path = "src/block" } garage_net = { version = "0.9.4", path = "src/net" }
garage_db = { version = "1.0.1", path = "src/db", default-features = false } garage_rpc = { version = "0.9.4", path = "src/rpc" }
garage_model = { version = "1.0.1", path = "src/model", default-features = false } garage_table = { version = "0.9.4", path = "src/table" }
garage_net = { version = "1.0.1", path = "src/net" } garage_util = { version = "0.9.4", path = "src/util" }
garage_rpc = { version = "1.0.1", path = "src/rpc" } garage_web = { version = "0.9.4", path = "src/web" }
garage_table = { version = "1.0.1", path = "src/table" }
garage_util = { version = "1.0.1", path = "src/util" }
garage_web = { version = "1.0.1", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" } k2v-client = { version = "0.0.4", path = "src/k2v-client" }
# External crates from crates.io # External crates from crates.io
@ -49,9 +43,8 @@ bytes = "1.0"
bytesize = "1.1" bytesize = "1.1"
cfg-if = "1.0" cfg-if = "1.0"
chrono = "0.4" chrono = "0.4"
crc32fast = "1.4"
crc32c = "0.6"
crypto-common = "0.1" crypto-common = "0.1"
digest = "0.10"
err-derive = "0.3" err-derive = "0.3"
gethostname = "0.4" gethostname = "0.4"
git-version = "0.3.4" git-version = "0.3.4"
@ -60,22 +53,19 @@ hexdump = "0.1"
hmac = "0.12" hmac = "0.12"
idna = "0.5" idna = "0.5"
itertools = "0.12" itertools = "0.12"
ipnet = "2.9.0"
lazy_static = "1.4" lazy_static = "1.4"
md-5 = "0.10" md-5 = "0.10"
mktemp = "0.5" mktemp = "0.5"
nix = { version = "0.29", default-features = false, features = ["fs"] } nix = { version = "0.27", default-features = false, features = ["fs"] }
nom = "7.1" nom = "7.1"
parse_duration = "2.1" parse_duration = "2.1"
pin-project = "1.0.12" pin-project = "1.0.12"
pnet_datalink = "0.34" pnet_datalink = "0.34"
rand = "0.8" rand = "0.8"
sha1 = "0.10"
sha2 = "0.10" sha2 = "0.10"
timeago = { version = "0.4", default-features = false } timeago = { version = "0.4", default-features = false }
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] } xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
aes-gcm = { version = "0.10", features = ["aes", "stream"] }
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" } sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] } kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
@ -90,6 +80,7 @@ heed = { version = "0.11", default-features = false, features = ["lmdb"] }
rusqlite = "0.31.0" rusqlite = "0.31.0"
r2d2 = "0.8" r2d2 = "0.8"
r2d2_sqlite = "0.24" r2d2_sqlite = "0.24"
sled = "0.34"
async-compression = { version = "0.4", features = ["tokio", "zstd"] } async-compression = { version = "0.4", features = ["tokio", "zstd"] }
zstd = { version = "0.13", default-features = false } zstd = { version = "0.13", default-features = false }
@ -141,6 +132,8 @@ thiserror = "1.0"
assert-json-diff = "2.0" assert-json-diff = "2.0"
rustc_version = "0.4.0" rustc_version = "0.4.0"
static_init = "1.0" static_init = "1.0"
aws-config = "1.1.4"
aws-sdk-config = "1.13" aws-sdk-config = "1.13"
aws-sdk-s3 = "1.14" aws-sdk-s3 = "1.14"

View File

@ -3,5 +3,5 @@ FROM scratch
ENV RUST_BACKTRACE=1 ENV RUST_BACKTRACE=1
ENV RUST_LOG=garage=info ENV RUST_LOG=garage=info
COPY result/bin/garage / COPY result-bin/bin/garage /
CMD [ "/garage", "server"] CMD [ "/garage", "server"]

View File

@ -1,10 +1,13 @@
.PHONY: doc all run1 run2 run3 .PHONY: doc all release shell run1 run2 run3
all: all:
clear clear; cargo build
cargo build \
--config 'target.x86_64-unknown-linux-gnu.linker="clang"' \ release:
--config 'target.x86_64-unknown-linux-gnu.rustflags=["-C", "link-arg=-fuse-ld=mold"]' \ nix-build --attr pkgs.amd64.release --no-build-output
shell:
nix-shell
# ---- # ----

View File

@ -3,22 +3,54 @@
with import ./nix/common.nix; with import ./nix/common.nix;
let let
pkgs = import nixpkgs { }; pkgs = import pkgsSrc { };
compile = import ./nix/compile.nix; compile = import ./nix/compile.nix;
build_release = target: (compile { build_debug_and_release = (target: {
inherit target system git_version nixpkgs; debug = (compile {
crane = flake.inputs.crane; inherit system target git_version pkgsSrc cargo2nixOverlay;
rust-overlay = flake.inputs.rust-overlay; release = false;
}).workspace.garage { compileMode = "build"; };
release = (compile {
inherit system target git_version pkgsSrc cargo2nixOverlay;
release = true; release = true;
}).garage; }).workspace.garage { compileMode = "build"; };
});
test = (rustPkgs:
pkgs.symlinkJoin {
name = "garage-tests";
paths =
builtins.map (key: rustPkgs.workspace.${key} { compileMode = "test"; })
(builtins.attrNames rustPkgs.workspace);
});
in { in {
releasePackages = { pkgs = {
amd64 = build_release "x86_64-unknown-linux-musl"; amd64 = build_debug_and_release "x86_64-unknown-linux-musl";
i386 = build_release "i686-unknown-linux-musl"; i386 = build_debug_and_release "i686-unknown-linux-musl";
arm64 = build_release "aarch64-unknown-linux-musl"; arm64 = build_debug_and_release "aarch64-unknown-linux-musl";
arm = build_release "armv6l-unknown-linux-musleabihf"; arm = build_debug_and_release "armv6l-unknown-linux-musleabihf";
};
test = {
amd64 = test (compile {
inherit system git_version pkgsSrc cargo2nixOverlay;
target = "x86_64-unknown-linux-musl";
features = [
"garage/bundled-libs"
"garage/k2v"
"garage/sled"
"garage/lmdb"
"garage/sqlite"
];
});
};
clippy = {
amd64 = (compile {
inherit system git_version pkgsSrc cargo2nixOverlay;
target = "x86_64-unknown-linux-musl";
compiler = "clippy";
}).workspace.garage { compileMode = "build"; };
}; };
flakePackages = flake.packages.${system};
} }

View File

@ -98,6 +98,7 @@ paths:
type: string type: string
example: example:
- "k2v" - "k2v"
- "sled"
- "lmdb" - "lmdb"
- "sqlite" - "sqlite"
- "consul-discovery" - "consul-discovery"

View File

@ -23,7 +23,7 @@ client = minio.Minio(
"GKyourapikey", "GKyourapikey",
"abcd[...]1234", "abcd[...]1234",
# Force the region, this is specific to garage # Force the region, this is specific to garage
region="garage", region="region",
) )
``` ```

View File

@ -80,53 +80,6 @@ To test your new configuration, just reload your Nextcloud webpage and start sen
*External link:* [Nextcloud Documentation > Primary Storage](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html) *External link:* [Nextcloud Documentation > Primary Storage](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html)
#### SSE-C encryption (since Garage v1.0)
Since version 1.0, Garage supports server-side encryption with customer keys
(SSE-C). In this mode, Garage is responsible for encrypting and decrypting
objects, but it does not store the encryption key itself. The encryption key
should be provided by Nextcloud upon each request. This mode of operation is
supported by Nextcloud and it has successfully been tested together with
Garage.
To enable SSE-C encryption:
1. Make sure your Garage server is accessible via SSL through a reverse proxy
such as Nginx, and that it is using a valid public certificate (Nextcloud
might be able to connect to an S3 server that is using a self-signed
certificate, but you will lose many hours while trying, so don't).
Configure values for `use_ssl` and `port` accordingly in your `config.php`
file.
2. Generate an encryption key using the following command:
```
openssl rand -base64 32
```
Make sure to keep this key **secret**!
3. Add the encryption key in your `config.php` file as follows:
```php
<?php
$CONFIG = array(
'objectstore' => [
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' => [
...
'sse_c_key' => 'exampleencryptionkeyLbU+5fKYQcVoqnn+RaIOXgo=',
...
],
],
```
Nextcloud will now make Garage encrypt files at rest in the storage bucket.
These files will not be readable by an S3 client that has credentials to the
bucket but doesn't also know the secret encryption key.
### External Storage ### External Storage
**From the GUI.** Activate the "External storage support" app from the "Applications" page (click on your account icon on the top right corner of your screen to display the menu). Go to your parameters page (also located below your account icon). Click on external storage (or the corresponding translation in your language). **From the GUI.** Activate the "External storage support" app from the "Applications" page (click on your account icon on the top right corner of your screen to display the menu). Go to your parameters page (also located below your account icon). Click on external storage (or the corresponding translation in your language).
@ -292,7 +245,7 @@ with average object size ranging from 50 KB to 150 KB.
As such, your Garage cluster should be configured appropriately for good performance: As such, your Garage cluster should be configured appropriately for good performance:
- use Garage v0.8.0 or higher with the [LMDB database engine](@documentation/reference-manual/configuration.md#db-engine-since-v0-8-0). - use Garage v0.8.0 or higher with the [LMDB database engine](@documentation/reference-manual/configuration.md#db-engine-since-v0-8-0).
Older versions of Garage used the Sled database engine which had issues, such as databases quickly ending up taking tens of GB of disk space. With the default Sled database engine, your database could quickly end up taking tens of GB of disk space.
- the Garage database should be stored on a SSD - the Garage database should be stored on a SSD
### Creating your bucket ### Creating your bucket
@ -335,7 +288,6 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
```bash ```bash
$ RAILS_ENV=production bin/tootctl media remove --days 3 $ RAILS_ENV=production bin/tootctl media remove --days 3
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
$ RAILS_ENV=production bin/tootctl media remove-orphans $ RAILS_ENV=production bin/tootctl media remove-orphans
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15 $ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
``` ```
@ -354,6 +306,8 @@ Imports: 1.7 KB
Settings: 0 Bytes Settings: 0 Bytes
``` ```
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
### Migrating your data ### Migrating your data
Data migration should be done with an efficient S3 client. Data migration should be done with an efficient S3 client.

View File

@ -17,7 +17,7 @@ Garage can also help you serve this content.
## Gitea ## Gitea
You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachments. You can use Garage with Gitea to store your [git LFS](https://git-lfs.github.com/) data, your users' avatar, and their attachements.
You can configure a different target for each data type (check `[lfs]` and `[attachment]` sections of the Gitea documentation) and you can provide a default one through the `[storage]` section. You can configure a different target for each data type (check `[lfs]` and `[attachment]` sections of the Gitea documentation) and you can provide a default one through the `[storage]` section.
Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere): Let's start by creating a key and a bucket (your key id and secret will be needed later, keep them somewhere):

View File

@ -53,43 +53,20 @@ and that's also why your nodes have super long identifiers.
Adding TLS support built into Garage is not currently planned. Adding TLS support built into Garage is not currently planned.
## Garage stores data in plain text on the filesystem or encrypted using customer keys (SSE-C) ## Garage stores data in plain text on the filesystem
For standard S3 API requests, Garage does not encrypt data at rest by itself. Garage does not handle data encryption at rest by itself, and instead delegates
For the most generic at rest encryption of data, we recommend setting up your to the user to add encryption, either at the storage layer (LUKS, etc) or on
storage partitions on encrypted LUKS devices. the client side (or both). There are no current plans to add data encryption
directly in Garage.
If you are developping your own client software that makes use of S3 storage, Implementing data encryption directly in Garage might make things simpler for
we recommend implementing data encryption directly on the client side and never end users, but also raises many more questions, especially around key
transmitting plaintext data to Garage. This makes it easy to use an external management: for encryption of data, where could Garage get the encryption keys
untrusted storage provider if necessary. from ? If we encrypt data but keep the keys in a plaintext file next to them,
it's useless. We probably don't want to have to manage secrets in garage as it
Garage does support [SSE-C would be very hard to do in a secure way. Maybe integrate with an external
encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html), system such as Hashicorp Vault?
an encryption mode of Amazon S3 where data is encrypted at rest using
encryption keys given by the client. The encryption keys are passed to the
server in a header in each request, to encrypt or decrypt data at the moment of
reading or writing. The server discards the key as soon as it has finished
using it for the request. This mode allows the data to be encrypted at rest by
Garage itself, but it requires support in the client software. It is also not
adapted to a model where the server is not trusted or assumed to be
compromised, as the server can easily know the encryption keys. Note however
that when using SSE-C encryption, the only Garage node that knows the
encryption key passed in a given request is the node to which the request is
directed (which can be a gateway node), so it is easy to have untrusted nodes
in the cluster as long as S3 API requests containing SSE-C encryption keys are
not directed to them.
Implementing automatic data encryption directly in Garage without client-side
management of keys (something like
[SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html))
could make things simpler for end users that don't want to setup LUKS, but also
raises many more questions, especially around key management: for encryption of
data, where could Garage get the encryption keys from? If we encrypt data but
keep the keys in a plaintext file next to them, it's useless. We probably don't
want to have to manage secrets in Garage as it would be very hard to do in a
secure way. At the time of speaking, there are no plans to implement this in
Garage.
# Adding data encryption using external tools # Adding data encryption using external tools

View File

@ -91,5 +91,6 @@ The following feature flags are available in v0.8.0:
| `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API | | `metrics` | *by default* | Enable collection of metrics in Prometheus format on the admin API |
| `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry | | `telemetry-otlp` | optional | Enable collection of execution traces using OpenTelemetry |
| `syslog` | optional | Enable logging to Syslog | | `syslog` | optional | Enable logging to Syslog |
| `sled` | *by default* | Enable using Sled to store Garage's metadata |
| `lmdb` | *by default* | Enable using LMDB to store Garage's metadata | | `lmdb` | *by default* | Enable using LMDB to store Garage's metadata |
| `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata | | `sqlite` | *by default* | Enable using Sqlite3 to store Garage's metadata |

View File

@ -90,20 +90,19 @@ to store 2 TB of data in total.
- If you only have an HDD and no SSD, it's fine to put your metadata alongside - If you only have an HDD and no SSD, it's fine to put your metadata alongside
the data on the same drive, but then consider your filesystem choice wisely the data on the same drive, but then consider your filesystem choice wisely
(see above). Having lots of RAM for your kernel to cache the metadata will (see above). Having lots of RAM for your kernel to cache the metadata will
help a lot with performance. The default LMDB database engine is the most help a lot with performance.
tested and has good performance.
## Get a Docker image ## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
We encourage you to use a fixed tag (eg. `v1.0.1`) and not the `latest` tag. We encourage you to use a fixed tag (eg. `v0.9.4`) and not the `latest` tag.
For this example, we will use the latest published version at the time of the writing which is `v1.0.1` but it's up to you For this example, we will use the latest published version at the time of the writing which is `v0.9.4` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated). to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example: For example:
``` ```
sudo docker pull dxflrs/garage:v1.0.1 sudo docker pull dxflrs/garage:v0.9.4
``` ```
## Deploying and configuring Garage ## Deploying and configuring Garage
@ -128,7 +127,7 @@ data_dir = "/var/lib/garage/data"
db_engine = "lmdb" db_engine = "lmdb"
metadata_auto_snapshot_interval = "6h" metadata_auto_snapshot_interval = "6h"
replication_factor = 3 replication_mode = "3"
compression_level = 2 compression_level = 2
@ -152,8 +151,6 @@ Check the following for your configuration files:
- Make sure `rpc_public_addr` contains the public IP address of the node you are configuring. - Make sure `rpc_public_addr` contains the public IP address of the node you are configuring.
This parameter is optional but recommended: if your nodes have trouble communicating with This parameter is optional but recommended: if your nodes have trouble communicating with
one another, consider adding it. one another, consider adding it.
Alternatively, you can also set `rpc_public_addr_subnet`, which can filter
the addresses announced to other peers to a specific subnet.
- Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key. - Make sure `rpc_secret` is the same value on all nodes. It should be a 32-bytes hex-encoded secret key.
You can generate such a key with `openssl rand -hex 32`. You can generate such a key with `openssl rand -hex 32`.
@ -171,7 +168,7 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \ -v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \ -v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \ -v /var/lib/garage/data:/var/lib/garage/data \
dxflrs/garage:v1.0.1 dxflrs/garage:v0.9.4
``` ```
With this command line, Garage should be started automatically at each boot. With this command line, Garage should be started automatically at each boot.
@ -185,7 +182,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3" version: "3"
services: services:
garage: garage:
image: dxflrs/garage:v1.0.1 image: dxflrs/garage:v0.9.4
network_mode: "host" network_mode: "host"
restart: unless-stopped restart: unless-stopped
volumes: volumes:

View File

@ -50,20 +50,3 @@ locations. They use Garage themselves for the following tasks:
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
9 nodes in 3 physical locations. 9 nodes in 3 physical locations.
### Triplebit
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
ISP focused on improving access to privacy-related services. They use
Garage themselves for the following tasks:
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
Triplebit's Garage cluster is a multi-site cluster currently composed of
10 nodes in 3 physical locations.

View File

@ -97,7 +97,7 @@ delete a tombstone, the following condition has to be met:
superseeded by the tombstone. This ensures that deleting the tombstone is superseeded by the tombstone. This ensures that deleting the tombstone is
safe and that no deleted value will come back in the system. safe and that no deleted value will come back in the system.
Garage uses atomic database operations (such as compare-and-swap and Garage makes use of Sled's atomic operations (such as compare-and-swap and
transactions) to ensure that only tombstones that have been correctly transactions) to ensure that only tombstones that have been correctly
propagated to other nodes are ever deleted from the local entry tree. propagated to other nodes are ever deleted from the local entry tree.

View File

@ -67,7 +67,7 @@ Pithos has been abandonned and should probably not used yet, in the following we
Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too). Pithos was relying as a S3 proxy in front of Cassandra (and was working with Scylla DB too).
From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment. From its designers' mouth, storing data in Cassandra has shown its limitations justifying the project abandonment.
They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it. They built a closed-source version 2 that does not store blobs in the database (only metadata) but did not communicate further on it.
We considered their v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility. We considered there v2's design but concluded that it does not fit both our *Self-contained & lightweight* and *Simple* properties. It makes the development, the deployment and the operations more complicated while reducing the flexibility.
**[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):** **[Riak CS](https://docs.riak.com/riak/cs/2.1.1/index.html):**
*Not written yet* *Not written yet*

View File

@ -36,7 +36,7 @@ sudo killall nix-daemon
Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell: Now you can enter our nix-shell, all the required packages will be downloaded but they will not pollute your environment outside of the shell:
```bash ```bash
nix-shell -A devShell nix-shell
``` ```
You can use the traditional Rust development workflow: You can use the traditional Rust development workflow:
@ -65,8 +65,8 @@ nix-build -j $(nproc) --max-jobs auto
``` ```
Our build has multiple parameters you might want to set: Our build has multiple parameters you might want to set:
- `release` to build with release optimisations instead of debug - `release` build with release optimisations instead of debug
- `target` allows for cross compilation - `target allows` for cross compilation
- `compileMode` can be set to test or bench to build a unit test runner - `compileMode` can be set to test or bench to build a unit test runner
- `git_version` to inject the hash to display when running `garage stats` - `git_version` to inject the hash to display when running `garage stats`

View File

@ -141,7 +141,4 @@ blocks may still be held by Garage. If you suspect that such corruption has occu
in your cluster, you can run one of the following repair procedures: in your cluster, you can run one of the following repair procedures:
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version - `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
- `garage repair block-refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected)
- `garage repair block-rc`: checks that the reference counters for blocks are in sync with the actual number of non-deleted entries in the block reference table

View File

@ -12,7 +12,7 @@ An introduction to building cluster layouts can be found in the [production depl
In Garage, all of the data that can be stored in a given cluster is divided In Garage, all of the data that can be stored in a given cluster is divided
into slices which we call *partitions*. Each partition is stored by into slices which we call *partitions*. Each partition is stored by
one or several nodes in the cluster one or several nodes in the cluster
(see [`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)). (see [`replication_mode`](@/documentation/reference-manual/configuration.md#replication_mode)).
The layout determines the correspondence between these partitions, The layout determines the correspondence between these partitions,
which exist on a logical level, and actual storage nodes. which exist on a logical level, and actual storage nodes.

View File

@ -21,14 +21,14 @@ data_dir = [
``` ```
Garage will automatically balance all blocks stored by the node Garage will automatically balance all blocks stored by the node
among the different specified directories, proportionally to the among the different specified directories, proportionnally to the
specified capacities. specified capacities.
## Updating the list of storage locations ## Updating the list of storage locations
If you add new storage locations to your `data_dir`, If you add new storage locations to your `data_dir`,
Garage will not rebalance existing data between storage locations. Garage will not rebalance existing data between storage locations.
Newly written blocks will be balanced proportionally to the specified capacities, Newly written blocks will be balanced proportionnally to the specified capacities,
and existing data may be moved between drives to improve balancing, and existing data may be moved between drives to improve balancing,
but only opportunistically when a data block is re-written (e.g. an object but only opportunistically when a data block is re-written (e.g. an object
is re-uploaded, or an object with a duplicate block is uploaded). is re-uploaded, or an object with a duplicate block is uploaded).

View File

@ -5,7 +5,7 @@ weight = 40
Garage is meant to work on old, second-hand hardware. Garage is meant to work on old, second-hand hardware.
In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed. In particular, this makes it likely that some of your drives will fail, and some manual intervention will be needed.
Fear not! Garage is fully equipped to handle drive failures, in most common cases. Fear not! For Garage is fully equipped to handle drive failures, in most common cases.
## A note on availability of Garage ## A note on availability of Garage

View File

@ -42,13 +42,6 @@ If a binary of the last version is not available for your architecture,
or if you want a build customized for your system, or if you want a build customized for your system,
you can [build Garage from source](@/documentation/cookbook/from-source.md). you can [build Garage from source](@/documentation/cookbook/from-source.md).
If none of these option work for you, you can also run Garage in a Docker
container. When using Docker, the commands used in this guide will not work
anymore. We recommend reading the tutorial on [configuring a
multi-node cluster](@/documentation/cookbook/real-world.md) to learn about
using Garage as a Docker container. For simplicity, a minimal command to launch
Garage using Docker is provided in this quick start guide as well.
## Configuring and starting Garage ## Configuring and starting Garage
@ -66,7 +59,7 @@ metadata_dir = "/tmp/meta"
data_dir = "/tmp/data" data_dir = "/tmp/data"
db_engine = "sqlite" db_engine = "sqlite"
replication_factor = 1 replication_mode = "none"
rpc_bind_addr = "[::]:3901" rpc_bind_addr = "[::]:3901"
rpc_public_addr = "127.0.0.1:3901" rpc_public_addr = "127.0.0.1:3901"
@ -92,9 +85,6 @@ metrics_token = "$(openssl rand -base64 32)"
EOF EOF
``` ```
See the [Configuration file format](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/)
for complete options and values.
Now that your configuration file has been created, you may save it to the directory of your choice. Now that your configuration file has been created, you may save it to the directory of your choice.
By default, Garage looks for **`/etc/garage.toml`.** By default, Garage looks for **`/etc/garage.toml`.**
You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml` You can also store it somewhere else, but you will have to specify `-c path/to/garage.toml`
@ -121,26 +111,6 @@ garage -c path/to/garage.toml server
If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`. If you have placed the `garage.toml` file in `/etc` (its default location), you can simply run `garage server`.
Alternatively, if you cannot or do not wish to run the Garage binary directly,
you may use Docker to run Garage in a container using the following command:
```bash
docker run \
-d \
--name garaged \
-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903 \
-v /etc/garage.toml:/path/to/garage.toml \
-v /var/lib/garage/meta:/path/to/garage/meta \
-v /var/lib/garage/data:/path/to/garage/data \
dxflrs/garage:v0.9.4
```
Under Linux, you can substitute `--network host` for `-p 3900:3900 -p 3901:3901 -p 3902:3902 -p 3903:3903`
#### Troubleshooting
Ensure your configuration file, `metadata_dir` and `data_dir` are readable by the user running the `garage` server or Docker.
You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \ You can tune Garage's verbosity by setting the `RUST_LOG=` environment variable. \
Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`. Available log levels are (from less verbose to more verbose): `error`, `warn`, `info` *(default)*, `debug` and `trace`.
@ -161,9 +131,6 @@ It uses values from the TOML configuration file to find the Garage daemon runnin
local node, therefore if your configuration file is not at `/etc/garage.toml` you will local node, therefore if your configuration file is not at `/etc/garage.toml` you will
again have to specify `-c path/to/garage.toml` at each invocation. again have to specify `-c path/to/garage.toml` at each invocation.
If you are running Garage in a Docker container, you can set `alias garage="docker exec -ti <container name> /garage"`
to use the Garage binary inside your container.
If the `garage` CLI is able to correctly detect the parameters of your local Garage node, If the `garage` CLI is able to correctly detect the parameters of your local Garage node,
the following command should be enough to show the status of your cluster: the following command should be enough to show the status of your cluster:
@ -199,7 +166,7 @@ For instance here you could write just `garage layout assign -z dc1 -c 1G 563e`.
The layout then has to be applied to the cluster, using: The layout then has to be applied to the cluster, using:
```bash ```bash
garage layout apply --version 1 garage layout apply
``` ```
@ -349,7 +316,7 @@ Check [our s3 compatibility list](@/documentation/reference-manual/s3-compatibil
### Other tools for interacting with Garage ### Other tools for interacting with Garage
The following tools can also be used to send and receive files from/to Garage: The following tools can also be used to send and recieve files from/to Garage:
- [minio-client](@/documentation/connect/cli.md#minio-client) - [minio-client](@/documentation/connect/cli.md#minio-client)
- [s3cmd](@/documentation/connect/cli.md#s3cmd) - [s3cmd](@/documentation/connect/cli.md#s3cmd)

View File

@ -8,16 +8,13 @@ weight = 20
Here is an example `garage.toml` configuration file that illustrates all of the possible options: Here is an example `garage.toml` configuration file that illustrates all of the possible options:
```toml ```toml
replication_factor = 3 replication_mode = "3"
consistency_mode = "consistent"
metadata_dir = "/var/lib/garage/meta" metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data" data_dir = "/var/lib/garage/data"
metadata_snapshots_dir = "/var/lib/garage/snapshots"
metadata_fsync = true metadata_fsync = true
data_fsync = false data_fsync = false
disable_scrub = false disable_scrub = false
use_local_tz = false
metadata_auto_snapshot_interval = "6h" metadata_auto_snapshot_interval = "6h"
db_engine = "lmdb" db_engine = "lmdb"
@ -25,6 +22,8 @@ db_engine = "lmdb"
block_size = "1M" block_size = "1M"
block_ram_buffer_max = "256MiB" block_ram_buffer_max = "256MiB"
sled_cache_capacity = "128MiB"
sled_flush_every_ms = 2000
lmdb_map_size = "1T" lmdb_map_size = "1T"
compression_level = 1 compression_level = 1
@ -33,9 +32,6 @@ rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
rpc_bind_addr = "[::]:3901" rpc_bind_addr = "[::]:3901"
rpc_bind_outgoing = false rpc_bind_outgoing = false
rpc_public_addr = "[fc00:1::1]:3901" rpc_public_addr = "[fc00:1::1]:3901"
# or set rpc_public_adr_subnet to filter down autodiscovery to a subnet:
# rpc_public_addr_subnet = "2001:0db8:f00:b00:/64"
allow_world_readable_secrets = false allow_world_readable_secrets = false
@ -101,19 +97,17 @@ Top-level configuration options:
[`data_fsync`](#data_fsync), [`data_fsync`](#data_fsync),
[`db_engine`](#db_engine), [`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub), [`disable_scrub`](#disable_scrub),
[`use_local_tz`](#use_local_tz),
[`lmdb_map_size`](#lmdb_map_size), [`lmdb_map_size`](#lmdb_map_size),
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval), [`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
[`metadata_dir`](#metadata_dir), [`metadata_dir`](#metadata_dir),
[`metadata_fsync`](#metadata_fsync), [`metadata_fsync`](#metadata_fsync),
[`metadata_snapshots_dir`](#metadata_snapshots_dir), [`replication_mode`](#replication_mode),
[`replication_factor`](#replication_factor),
[`consistency_mode`](#consistency_mode),
[`rpc_bind_addr`](#rpc_bind_addr), [`rpc_bind_addr`](#rpc_bind_addr),
[`rpc_bind_outgoing`](#rpc_bind_outgoing), [`rpc_bind_outgoing`](#rpc_bind_outgoing),
[`rpc_public_addr`](#rpc_public_addr), [`rpc_public_addr`](#rpc_public_addr),
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet) [`rpc_secret`/`rpc_secret_file`](#rpc_secret),
[`rpc_secret`/`rpc_secret_file`](#rpc_secret). [`sled_cache_capacity`](#sled_cache_capacity),
[`sled_flush_every_ms`](#sled_flush_every_ms).
The `[consul_discovery]` section: The `[consul_discovery]` section:
[`api`](#consul_api), [`api`](#consul_api),
@ -167,12 +161,11 @@ values in the configuration file:
### Top-level configuration options ### Top-level configuration options
#### `replication_factor` {#replication_factor} #### `replication_mode` {#replication_mode}
The replication factor can be any positive integer smaller or equal the node count in your cluster. Garage supports the following replication modes:
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
- `1`: data stored on Garage is stored on a single node. There is no - `none` or `1`: data stored on Garage is stored on a single node. There is no
redundancy, and data will be unavailable as soon as one node fails or its redundancy, and data will be unavailable as soon as one node fails or its
network is disconnected. Do not use this for anything else than test network is disconnected. Do not use this for anything else than test
deployments. deployments.
@ -183,6 +176,17 @@ The chosen replication factor has a big impact on the cluster's failure toleranc
before losing data. Data remains available in read-only mode when one node is before losing data. Data remains available in read-only mode when one node is
down, but write operations will fail. down, but write operations will fail.
- `2-dangerous`: a variant of mode `2`, where written objects are written to
the second replica asynchronously. This means that Garage will return `200
OK` to a PutObject request before the second copy is fully written (or even
before it even starts being written). This means that data can more easily
be lost if the node crashes before a second copy can be completed. This
also means that written objects might not be visible immediately in read
operations. In other words, this mode severely breaks the consistency and
durability guarantees of standard Garage cluster operation. Benefits of
this mode: you can still write to your cluster when one node is
unavailable.
- `3`: data stored on Garage will be stored on three different nodes, if - `3`: data stored on Garage will be stored on three different nodes, if
possible each in a different zones. Garage tolerates two node failure, or possible each in a different zones. Garage tolerates two node failure, or
several node failures but in no more than two zones (in a deployment with at several node failures but in no more than two zones (in a deployment with at
@ -190,84 +194,55 @@ The chosen replication factor has a big impact on the cluster's failure toleranc
or node failures are only in a single zone, reading and writing data to or node failures are only in a single zone, reading and writing data to
Garage can continue normally. Garage can continue normally.
- `5`, `7`, ...: When setting the replication factor above 3, it is most useful to - `3-degraded`: a variant of replication mode `3`, that lowers the read
choose an uneven value, since for every two copies added, one more node can fail
before losing the ability to write and read to the cluster.
Note that in modes `2` and `3`,
if at least the same number of zones are available, an arbitrary number of failures in
any given zone is tolerated as copies of data will be spread over several zones.
**Make sure `replication_factor` is the same in the configuration files of all nodes.
Never run a Garage cluster where that is not the case.**
It is technically possible to change the replication factor although it's a
dangerous operation that is not officially supported. This requires you to
delete the existing cluster layout and create a new layout from scratch,
meaning that a full rebalancing of your cluster's data will be needed. To do
it, shut down your cluster entirely, delete the `custer_layout` files in the
meta directories of all your nodes, update all your configuration files with
the new `replication_factor` parameter, restart your cluster, and then create a
new layout with all the nodes you want to keep. Rebalancing data will take
some time, and data might temporarily appear unavailable to your users.
It is recommended to shut down public access to the cluster while rebalancing
is in progress. In theory, no data should be lost as rebalancing is a
routine operation for Garage, although we cannot guarantee you that everything
will go right in such an extreme scenario.
#### `consistency_mode` {#consistency_mode}
The consistency mode setting determines the read and write behaviour of your cluster.
- `consistent`: The default setting. This is what the paragraph above describes.
The read and write quorum will be determined so that read-after-write consistency
is guaranteed.
- `degraded`: Lowers the read
quorum to `1`, to allow you to read data from your cluster when several quorum to `1`, to allow you to read data from your cluster when several
nodes (or nodes in several zones) are unavailable. In this mode, Garage nodes (or nodes in several zones) are unavailable. In this mode, Garage
does not provide read-after-write consistency anymore. does not provide read-after-write consistency anymore. The write quorum is
The write quorum stays the same as in the `consistent` mode, ensuring that still 2, ensuring that data successfully written to Garage is stored on at
data successfully written to Garage is stored on multiple nodes (depending least two nodes.
the replication factor).
- `dangerous`: This mode lowers both the read - `3-dangerous`: a variant of replication mode `3` that lowers both the read
and write quorums to `1`, to allow you to both read and write to your and write quorums to `1`, to allow you to both read and write to your
cluster when several nodes (or nodes in several zones) are unavailable. It cluster when several nodes (or nodes in several zones) are unavailable. It
is the least consistent mode of operation proposed by Garage, and also one is the least consistent mode of operation proposed by Garage, and also one
that should probably never be used. that should probably never be used.
Changing the `consistency_mode` between modes while leaving the `replication_factor` untouched Note that in modes `2` and `3`,
(e.g. setting your node's `consistency_mode` to `degraded` when it was previously unset, or from if at least the same number of zones are available, an arbitrary number of failures in
`dangerous` to `consistent`), can be done easily by just changing the `consistency_mode` any given zone is tolerated as copies of data will be spread over several zones.
parameter in your config files and restarting all your Garage nodes.
The consistency mode can be used together with various replication factors, to achieve **Make sure `replication_mode` is the same in the configuration files of all nodes.
a wide range of read and write characteristics. Some examples: Never run a Garage cluster where that is not the case.**
- Replication factor `2`, consistency mode `degraded`: While this mode
technically exists, its properties are the same as with consistency mode `consistent`,
since the read quorum with replication factor `2`, consistency mode `consistent` is already 1.
- Replication factor `2`, consistency mode `dangerous`: written objects are written to
the second replica asynchronously. This means that Garage will return `200
OK` to a PutObject request before the second copy is fully written (or even
before it even starts being written). This means that data can more easily
be lost if the node crashes before a second copy can be completed. This
also means that written objects might not be visible immediately in read
operations. In other words, this configuration severely breaks the consistency and
durability guarantees of standard Garage cluster operation. Benefits of
this configuration: you can still write to your cluster when one node is
unavailable.
The quorums associated with each replication mode are described below: The quorums associated with each replication mode are described below:
| `consistency_mode` | `replication_factor` | Write quorum | Read quorum | Read-after-write consistency? | | `replication_mode` | Number of replicas | Write quorum | Read quorum | Read-after-write consistency? |
| ------------------ | -------------------- | ------------ | ----------- | ----------------------------- | | ------------------ | ------------------ | ------------ | ----------- | ----------------------------- |
| `consistent` | 1 | 1 | 1 | yes | | `none` or `1` | 1 | 1 | 1 | yes |
| `consistent` | 2 | 2 | 1 | yes | | `2` | 2 | 2 | 1 | yes |
| `dangerous` | 2 | 1 | 1 | NO | | `2-dangerous` | 2 | 1 | 1 | NO |
| `consistent` | 3 | 2 | 2 | yes | | `3` | 3 | 2 | 2 | yes |
| `degraded` | 3 | 2 | 1 | NO | | `3-degraded` | 3 | 2 | 1 | NO |
| `dangerous` | 3 | 1 | 1 | NO | | `3-dangerous` | 3 | 1 | 1 | NO |
Changing the `replication_mode` between modes with the same number of replicas
(e.g. from `3` to `3-degraded`, or from `2-dangerous` to `2`), can be done easily by
just changing the `replication_mode` parameter in your config files and restarting all your
Garage nodes.
It is also technically possible to change the replication mode to a mode with a
different numbers of replicas, although it's a dangerous operation that is not
officially supported. This requires you to delete the existing cluster layout
and create a new layout from scratch, meaning that a full rebalancing of your
cluster's data will be needed. To do it, shut down your cluster entirely,
delete the `custer_layout` files in the meta directories of all your nodes,
update all your configuration files with the new `replication_mode` parameter,
restart your cluster, and then create a new layout with all the nodes you want
to keep. Rebalancing data will take some time, and data might temporarily
appear unavailable to your users. It is recommended to shut down public access
to the cluster while rebalancing is in progress. In theory, no data should be
lost as rebalancing is a routine operation for Garage, although we cannot
guarantee you that everything will go right in such an extreme scenario.
#### `metadata_dir` {#metadata_dir} #### `metadata_dir` {#metadata_dir}
@ -277,7 +252,6 @@ as the index of all objects, object version and object blocks.
Store this folder on a fast SSD drive if possible to maximize Garage's performance. Store this folder on a fast SSD drive if possible to maximize Garage's performance.
#### `data_dir` {#data_dir} #### `data_dir` {#data_dir}
The directory in which Garage will store the data blocks of objects. The directory in which Garage will store the data blocks of objects.
@ -298,43 +272,29 @@ data_dir = [
See [the dedicated documentation page](@/documentation/operations/multi-hdd.md) See [the dedicated documentation page](@/documentation/operations/multi-hdd.md)
on how to operate Garage in such a setup. on how to operate Garage in such a setup.
#### `metadata_snapshots_dir` (since Garage `v1.0.2`) {#metadata_snapshots_dir}
The directory in which Garage will store metadata snapshots when it
performs a snapshot of the metadata database, either when instructed to do
so from a RPC call or regularly through
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval).
By default, Garage will store snapshots into a `snapshots/` subdirectory
of [`metadata_dir`](#metadata_dir). This might quickly fill up your
metadata storage space if you use snapshots, because Garage will need up
to 4x the space of the existing metadata database: each snapshot requires
roughly as much space as the original database, and Garage temporarily
needs to store up to three different snapshots before it cleans up the oldest
snapshot to go back to two stored snapshots.
To prevent filling your disk, you might to change this setting to a
directory with ample available space, e.g. on the same storage space as
[`data_dir`](#data_dir).
#### `db_engine` (since `v0.8.0`) {#db_engine} #### `db_engine` (since `v0.8.0`) {#db_engine}
Since `v0.8.0`, Garage can use alternative storage backends as follows: Since `v0.8.0`, Garage can use alternative storage backends as follows:
| DB engine | `db_engine` value | Database path | | DB engine | `db_engine` value | Database path |
| --------- | ----------------- | ------------- | | --------- | ----------------- | ------------- |
| [LMDB](https://www.symas.com/lmdb) (since `v0.8.0`, default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` | | [LMDB](https://www.lmdb.tech) (default since `v0.9.0`) | `"lmdb"` | `<metadata_dir>/db.lmdb/` |
| [Sqlite](https://sqlite.org) (since `v0.8.0`) | `"sqlite"` | `<metadata_dir>/db.sqlite` | | [Sled](https://sled.rs) (default up to `v0.8.0`) | `"sled"` | `<metadata_dir>/db/` |
| [Sled](https://sled.rs) (old default, removed since `v1.0`) | `"sled"` | `<metadata_dir>/db/` | | [Sqlite](https://sqlite.org) | `"sqlite"` | `<metadata_dir>/db.sqlite` |
Sled was supported until Garage v0.9.x, and was removed in Garage v1.0. Sled was the only database engine up to Garage v0.7.0. Performance issues and
You can still use an older binary of Garage (e.g. v0.9.4) to migrate API limitations of Sled prompted the addition of alternative engines in v0.8.0.
old Sled metadata databases to another engine. Since v0.9.0, LMDB is the default engine instead of Sled, and Sled is
deprecated. We plan to remove Sled in Garage v1.0.
Performance characteristics of the different DB engines are as follows: Performance characteristics of the different DB engines are as follows:
- LMDB: the recommended database engine for high-performance distributed clusters. - Sled: tends to produce large data files and also has performance issues,
LMDB works very well, but is known to have the following limitations: especially when the metadata folder is on a traditional HDD and not on SSD.
- LMDB: the recommended database engine for high-performance distributed
clusters, much more space-efficient and significantly faster. LMDB works very
well, but is known to have the following limitations:
- The data format of LMDB is not portable between architectures, so for - The data format of LMDB is not portable between architectures, so for
instance the Garage database of an x86-64 node cannot be moved to an ARM64 instance the Garage database of an x86-64 node cannot be moved to an ARM64
@ -350,9 +310,6 @@ LMDB works very well, but is known to have the following limitations:
other nodes), or if you have saved regular snapshots at the filesystem other nodes), or if you have saved regular snapshots at the filesystem
level. level.
- Keys in LMDB are limited to 511 bytes. This limit translates to limits on
object keys in S3 and sort keys in K2V that are limted to 479 bytes.
- Sqlite: Garage supports Sqlite as an alternative storage backend for - Sqlite: Garage supports Sqlite as an alternative storage backend for
metadata, which does not have the issues listed above for LMDB. metadata, which does not have the issues listed above for LMDB.
On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal On versions 0.8.x and earlier, Sqlite should be avoided due to abysmal
@ -396,6 +353,7 @@ Here is how this option impacts the different database engines:
| Database | `metadata_fsync = false` (default) | `metadata_fsync = true` | | Database | `metadata_fsync = false` (default) | `metadata_fsync = true` |
|----------|------------------------------------|-------------------------------| |----------|------------------------------------|-------------------------------|
| Sled | default options | *unsupported* |
| Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` | | Sqlite | `PRAGMA synchronous = OFF` | `PRAGMA synchronous = NORMAL` |
| LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` | | LMDB | `MDB_NOMETASYNC` + `MDB_NOSYNC` | `MDB_NOMETASYNC` |
@ -442,7 +400,7 @@ month, with a random delay to avoid all nodes running at the same time. When
it scrubs the data directory, Garage will read all of the data files stored on it scrubs the data directory, Garage will read all of the data files stored on
disk to check their integrity, and will rebuild any data files that it finds disk to check their integrity, and will rebuild any data files that it finds
corrupted, using the remaining valid copies stored on other nodes. corrupted, using the remaining valid copies stored on other nodes.
See [this page](@/documentation/operations/durability-repairs.md#scrub) for details. See [this page](@/documentation/operations/durability-repair.md#scrub) for details.
Set the `disable_scrub` configuration value to `true` if you don't need Garage Set the `disable_scrub` configuration value to `true` if you don't need Garage
to scrub the data directory, for instance if you are already scrubbing at the to scrub the data directory, for instance if you are already scrubbing at the
@ -451,13 +409,6 @@ you should delete it from the data directory and then call `garage repair
blocks` on the node to ensure that it re-obtains a copy from another node on blocks` on the node to ensure that it re-obtains a copy from another node on
the network. the network.
#### `use_local_tz` {#use_local_tz}
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
`use_local_tz` configuration value to `true` if you want Garage to run the
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
you should also ensure that each node has the same timezone configuration.
#### `block_size` {#block_size} #### `block_size` {#block_size}
Garage splits stored objects in consecutive chunks of size `block_size` Garage splits stored objects in consecutive chunks of size `block_size`
@ -504,6 +455,21 @@ node.
The default value is 256MiB. The default value is 256MiB.
#### `sled_cache_capacity` {#sled_cache_capacity}
This parameter can be used to tune the capacity of the cache used by
[sled](https://sled.rs), the database Garage uses internally to store metadata.
Tune this to fit the RAM you wish to make available to your Garage instance.
This value has a conservative default (128MB) so that Garage doesn't use too much
RAM by default, but feel free to increase this for higher performance.
#### `sled_flush_every_ms` {#sled_flush_every_ms}
This parameters can be used to tune the flushing interval of sled.
Increase this if sled is thrashing your SSD, at the risk of losing more data in case
of a power outage (though this should not matter much as data is replicated on other
nodes). The default value, 2000ms, should be appropriate for most use cases.
#### `lmdb_map_size` {#lmdb_map_size} #### `lmdb_map_size` {#lmdb_map_size}
This parameters can be used to set the map size used by LMDB, This parameters can be used to set the map size used by LMDB,
@ -578,14 +544,6 @@ RPC calls. **This parameter is optional but recommended.** In case you have
a NAT that binds the RPC port to a port that is different on your public IP, a NAT that binds the RPC port to a port that is different on your public IP,
this field might help making it work. this field might help making it work.
#### `rpc_public_addr_subnet` {#rpc_public_addr_subnet}
In case `rpc_public_addr` is not set, but autodiscovery is used, this allows
filtering the list of automatically discovered IPs to a specific subnet.
For example, if nodes should pick *their* IP inside a specific subnet, but you
don't want to explicitly write the IP down (as it's dynamic, or you want to
share configs across nodes), you can use this option.
#### `bootstrap_peers` {#bootstrap_peers} #### `bootstrap_peers` {#bootstrap_peers}
A list of peer identifiers on which to contact other Garage peers of this cluster. A list of peer identifiers on which to contact other Garage peers of this cluster.

View File

@ -39,10 +39,10 @@ Read about cluster layout management [here](@/documentation/operations/layout.md
### Several replication modes ### Several replication modes
Garage supports a variety of replication modes, with configurable replica count, Garage supports a variety of replication modes, with 1 copy, 2 copies or 3 copies of your data,
and with various levels of consistency, in order to adapt to a variety of usage scenarios. and with various levels of consistency, in order to adapt to a variety of usage scenarios.
Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_factor) Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_mode)
to select the replication mode best suited to your use case (hint: in most cases, `replication_factor = 3` is what you want). to select the replication mode best suited to your use case (hint: in most cases, `replication_mode = "3"` is what you want).
### Compression and deduplication ### Compression and deduplication
@ -61,7 +61,7 @@ directed to a Garage cluster can be handled independently of one another instead
of going through a central bottleneck (the leader node). of going through a central bottleneck (the leader node).
As a consequence, requests can be handled much faster, even in cases where latency As a consequence, requests can be handled much faster, even in cases where latency
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this). between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
This is particularly useful when nodes are far from one another and talk to one other through standard Internet connections. This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
### Web server for static websites ### Web server for static websites

View File

@ -392,7 +392,7 @@ table_merkle_updater_todo_queue_length{table_name="block_ref"} 0
#### `table_sync_items_received`, `table_sync_items_sent` (counters) #### `table_sync_items_received`, `table_sync_items_sent` (counters)
Number of data items sent to/received from other nodes during resync procedures Number of data items sent to/recieved from other nodes during resync procedures
``` ```
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3 table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3

View File

@ -33,7 +33,6 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ | | [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ | | [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) | | [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part *Note:* OpenIO does not says if it supports presigned URLs. Because it is part
of signature v4 and they claim they support it without additional precisions, of signature v4 and they claim they support it without additional precisions,

View File

@ -42,7 +42,7 @@ The general principle are similar, but details have not been updated.**
A version is defined by the existence of at least one entry in the blocks table for a certain version UUID. A version is defined by the existence of at least one entry in the blocks table for a certain version UUID.
We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table. We must keep the following invariant: if a version exists in the blocks table, it has to be referenced in the objects table.
We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object. We explicitly manage concurrent versions of an object: the version timestamp and version UUID columns are index columns, thus we may have several concurrent versions of an object.
Important: before deleting an older version from the objects table, we must make sure that we did a successful delete of the blocks of that version from the blocks table. Important: before deleting an older version from the objects table, we must make sure that we did a successfull delete of the blocks of that version from the blocks table.
Thus, the workflow for reading an object is as follows: Thus, the workflow for reading an object is as follows:
@ -95,7 +95,7 @@ Known issue: if someone is reading from a version that we want to delete and the
Usefull metadata: Usefull metadata:
- list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist - list of versions that reference this block in the Casandra table, so that we can do GC by checking in Cassandra that the lines still exist
- list of other nodes that we know have acknowledged a write of this block, useful in the rebalancing algorithm - list of other nodes that we know have acknowledged a write of this block, usefull in the rebalancing algorithm
Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content). Write strategy: have a single thread that does all write IO so that it is serialized (or have several threads that manage independent parts of the hash space). When writing a blob, write it to a temporary file, close, then rename so that a concurrent read gets a consistent result (either not found or found with whole content).

View File

@ -68,7 +68,7 @@ The migration steps are as follows:
5. Turn off Garage 0.3 5. Turn off Garage 0.3
6. Backup metadata folders if you can (i.e. if you have space to do it 6. Backup metadata folders if you can (i.e. if you have space to do it
somewhere). Backuping data folders could also be useful but that's much somewhere). Backuping data folders could also be usefull but that's much
harder to do. If your filesystem supports snapshots, this could be a good harder to do. If your filesystem supports snapshots, this could be a good
time to use them. time to use them.

View File

@ -1,77 +0,0 @@
+++
title = "Migrating from 0.9 to 1.0"
weight = 11
+++
**This guide explains how to migrate to 1.0 if you have an existing 0.9 cluster.
We don't recommend trying to migrate to 1.0 directly from 0.8 or older.**
This migration procedure has been tested on several clusters without issues.
However, it is still a *critical procedure* that might cause issues.
**Make sure to back up all your data before attempting it!**
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md).
## Changes introduced in v1.0
The following are **breaking changes** in Garage v1.0 that require your attention when migrating:
- The Sled metadata db engine has been **removed**. If your cluster was still
using Sled, you will need to **use a Garage v0.9.x binary** to convert the
database using the `garage convert-db` subcommand. See
[here](@/documentation/reference-manual/configuration.md#db_engine) for the
details of the procedure.
The following syntax changes have been made to the configuration file:
- The `replication_mode` parameter has been split into two parameters:
[`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)
and
[`consistency_mode`](@/documentation/reference-manual/configuration.md#consistency_mode).
The old syntax using `replication_mode` is still supported for legacy
reasons and can still be used.
- The parameters `sled_cache_capacity` and `sled_flush_every_ms` have been removed.
## Migration procedure
The migration to Garage v1.0 can be done with almost no downtime,
by restarting all nodes at once in the new version.
The migration steps are as follows:
1. Do a `garage repair --all-nodes --yes tables`, check the logs and check that
all data seems to be synced correctly between nodes. If you have time, do
additional `garage repair` procedures (`blocks`, `versions`, `block_refs`,
etc.)
2. Ensure you have a snapshot of your Garage installation that you can restore
to in case the upgrade goes wrong:
- If you are running Garage v0.9.4 or later, use the `garage meta snapshot
--all` to make a backup snapshot of the metadata directories of your nodes
for backup purposes, and save a copy of the following files in the
metadata directories of your nodes: `cluster_layout`, `data_layout`,
`node_key`, `node_key.pub`.
- If you are running a filesystem such as ZFS or BTRFS that support
snapshotting, you can create a filesystem-level snapshot to be used as a
restoration point if needed.
- In other cases, make a backup using the old procedure: turn off each node
individually; back up its metadata folder (for instance, use the following
command if your metadata directory is `/var/lib/garage/meta`: `cd
/var/lib/garage ; tar -acf meta-v0.9.tar.zst meta/`); turn it back on
again. This will allow you to take a backup of all nodes without
impacting global cluster availability. You can do all nodes of a single
zone at once as this does not impact the availability of Garage.
3. Prepare your updated binaries and configuration files for Garage v1.0
4. Shut down all v0.9 nodes simultaneously, and restart them all simultaneously
in v1.0. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to
achieve this as fast as possible. Garage v1.0 should be in a working state
as soon as enough nodes have started.
5. Monitor your cluster in the following hours to see if it works well under
your production load.

View File

@ -37,7 +37,7 @@ There are two reasons for this:
Reminder: rules of simplicity, concerning changes to Garage's source code. Reminder: rules of simplicity, concerning changes to Garage's source code.
Always question what we are doing. Always question what we are doing.
Never do anything just because it looks nice or because we "think" it might be useful at some later point but without knowing precisely why/when. Never do anything just because it looks nice or because we "think" it might be usefull at some later point but without knowing precisely why/when.
Only do things that make perfect sense in the context of what we currently know. Only do things that make perfect sense in the context of what we currently know.
## References ## References

View File

@ -69,10 +69,11 @@ Example response body:
```json ```json
{ {
"node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df", "node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"garageVersion": "v1.0.1", "garageVersion": "git:v0.9.0-dev",
"garageFeatures": [ "garageFeatures": [
"k2v", "k2v",
"sled",
"lmdb", "lmdb",
"sqlite", "sqlite",
"metrics", "metrics",
@ -80,93 +81,84 @@ Example response body:
], ],
"rustVersion": "1.68.0", "rustVersion": "1.68.0",
"dbEngine": "LMDB (using Heed crate)", "dbEngine": "LMDB (using Heed crate)",
"layoutVersion": 5, "knownNodes": [
"nodes": [
{ {
"id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c", "id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"role": { "addr": "10.0.0.11:3901",
"id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c", "isUp": true,
"lastSeenSecsAgo": 9,
"hostname": "node1"
},
{
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"addr": "10.0.0.12:3901",
"isUp": true,
"lastSeenSecsAgo": 1,
"hostname": "node2"
},
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"addr": "10.0.0.21:3901",
"isUp": true,
"lastSeenSecsAgo": 7,
"hostname": "node3"
},
{
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"addr": "10.0.0.22:3901",
"isUp": true,
"lastSeenSecsAgo": 1,
"hostname": "node4"
}
],
"layout": {
"version": 12,
"roles": [
{
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
"zone": "dc1", "zone": "dc1",
"capacity": 100000000000, "capacity": 10737418240,
"tags": [] "tags": [
}, "node1"
"addr": "10.0.0.3:3901", ]
"hostname": "node3",
"isUp": true,
"lastSeenSecsAgo": 12,
"draining": false,
"dataPartition": {
"available": 660270088192,
"total": 873862266880
},
"metadataPartition": {
"available": 660270088192,
"total": 873862266880
}
}, },
{ {
"id": "a11c7cf18af297379eff8688360155fe68d9061654449ba0ce239252f5a7487f", "id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
"role": null,
"addr": "10.0.0.2:3901",
"hostname": "node2",
"isUp": true,
"lastSeenSecsAgo": 11,
"draining": true,
"dataPartition": {
"available": 660270088192,
"total": 873862266880
},
"metadataPartition": {
"available": 660270088192,
"total": 873862266880
}
},
{
"id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
"role": {
"id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
"zone": "dc1", "zone": "dc1",
"capacity": 100000000000, "capacity": 10737418240,
"tags": [] "tags": [
}, "node2"
"addr": "127.0.0.1:3904", ]
"hostname": "lindy",
"isUp": true,
"lastSeenSecsAgo": 2,
"draining": false,
"dataPartition": {
"available": 660270088192,
"total": 873862266880
},
"metadataPartition": {
"available": 660270088192,
"total": 873862266880
}
}, },
{ {
"id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df", "id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"role": { "zone": "dc2",
"id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df", "capacity": 10737418240,
"zone": "dc1", "tags": [
"capacity": 100000000000, "node3"
"tags": [] ]
},
"addr": "10.0.0.1:3901",
"hostname": "node1",
"isUp": true,
"lastSeenSecsAgo": 3,
"draining": false,
"dataPartition": {
"available": 660270088192,
"total": 873862266880
},
"metadataPartition": {
"available": 660270088192,
"total": 873862266880
} }
],
"stagedRoleChanges": [
{
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
"remove": false,
"zone": "dc2",
"capacity": 10737418240,
"tags": [
"node4"
]
}
{
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
"remove": true,
"zone": null,
"capacity": null,
"tags": null,
} }
] ]
} }
}
``` ```
#### GetClusterHealth `GET /v1/health` #### GetClusterHealth `GET /v1/health`

View File

@ -146,7 +146,7 @@ in a bucket, as the partition key becomes the sort key in the index.
How indexing works: How indexing works:
- Each node keeps a local count of how many items it stores for each partition, - Each node keeps a local count of how many items it stores for each partition,
in a local database tree that is updated atomically when an item is modified. in a local Sled tree that is updated atomically when an item is modified.
- These local counters are asynchronously stored in the index table which is - These local counters are asynchronously stored in the index table which is
a regular Garage table spread in the network. Counters are stored as LWW values, a regular Garage table spread in the network. Counters are stored as LWW values,
so basically the final table will have the following structure: so basically the final table will have the following structure:
@ -562,7 +562,7 @@ token>", v: ["<value1>", ...] }`, with the following fields:
- in case of concurrent update and deletion, a `null` is added to the list of concurrent values - in case of concurrent update and deletion, a `null` is added to the list of concurrent values
- if the `tombstones` query parameter is set to `true`, tombstones are returned - if the `tombstones` query parameter is set to `true`, tombstones are returned
for items that have been deleted (this can be useful for inserting after an for items that have been deleted (this can be usefull for inserting after an
item that has been deleted, so that the insert is not considered item that has been deleted, so that the insert is not considered
concurrent with the delete). Tombstones are returned as tuples in the concurrent with the delete). Tombstones are returned as tuples in the
same format with only `null` values same format with only `null` values

118
flake.lock generated
View File

@ -1,27 +1,38 @@
{ {
"nodes": { "nodes": {
"crane": { "cargo2nix": {
"inputs": {
"flake-compat": [
"flake-compat"
],
"flake-utils": "flake-utils",
"nixpkgs": [
"nixpkgs"
],
"rust-overlay": "rust-overlay"
},
"locked": { "locked": {
"lastModified": 1737689766, "lastModified": 1666087781,
"narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=", "narHash": "sha256-trKVdjMZ8mNkGfLcY5LsJJGtdV3xJDZnMVrkFjErlcs=",
"owner": "ipetkov", "owner": "Alexis211",
"repo": "crane", "repo": "cargo2nix",
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527", "rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "ipetkov", "owner": "Alexis211",
"repo": "crane", "repo": "cargo2nix",
"rev": "a7a61179b66054904ef6a195d8da736eaaa06c36",
"type": "github" "type": "github"
} }
}, },
"flake-compat": { "flake-compat": {
"locked": { "locked": {
"lastModified": 1717312683, "lastModified": 1688025799,
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=", "narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
"owner": "nix-community", "owner": "nix-community",
"repo": "flake-compat", "repo": "flake-compat",
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea", "rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -35,11 +46,29 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1731533236, "lastModified": 1681202837,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", "rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -50,47 +79,63 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1736692550, "lastModified": 1682109806,
"narHash": "sha256-7tk8xH+g0sJkKLTJFOxphJxxOjMDFMWv24nXslaU2ro=", "narHash": "sha256-d9g7RKNShMLboTWwukM+RObDWWpHKaqTYXB48clBWXI=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815", "rev": "2362848adf8def2866fabbffc50462e929d7fffb",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1707091808,
"narHash": "sha256-LahKBAfGbY836gtpVNnWwBTIzN7yf/uYM/S0g393r0Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "7c4869c47090dd7f9f1bdfb49a22aea026996815", "rev": "9f2ee8c91ac42da3ae6c6a1d21555f283458247e",
"type": "github" "type": "github"
} }
}, },
"root": { "root": {
"inputs": { "inputs": {
"crane": "crane", "cargo2nix": "cargo2nix",
"flake-compat": "flake-compat", "flake-compat": "flake-compat",
"flake-utils": "flake-utils", "flake-utils": [
"nixpkgs": "nixpkgs", "cargo2nix",
"rust-overlay": "rust-overlay" "flake-utils"
],
"nixpkgs": "nixpkgs_2"
} }
}, },
"rust-overlay": { "rust-overlay": {
"inputs": { "inputs": {
"nixpkgs": [ "flake-utils": "flake-utils_2",
"nixpkgs" "nixpkgs": "nixpkgs"
]
}, },
"locked": { "locked": {
"lastModified": 1738549608, "lastModified": 1707271822,
"narHash": "sha256-GdyT9QEUSx5k/n8kILuNy83vxxdyUfJ8jL5mMpQZWfw=", "narHash": "sha256-/DZsoPH5GBzOpVEGz5PgJ7vh8Q6TcrJq5u8FcBjqAfI=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d", "rev": "7a94fe7690d2bdfe1aab475382a505e14dc114a6",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "35c6f8c4352f995ecd53896200769f80a3e8f22d",
"type": "github" "type": "github"
} }
}, },
@ -108,6 +153,21 @@
"repo": "default", "repo": "default",
"type": "github" "type": "github"
} }
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
} }
}, },
"root": "root", "root": "root",

102
flake.nix
View File

@ -2,84 +2,88 @@
description = description =
"Garage, an S3-compatible distributed object store for self-hosted deployments"; "Garage, an S3-compatible distributed object store for self-hosted deployments";
# Nixpkgs 24.11 as of 2025-01-12 # Nixpkgs 23.11 as of 2024-02-07, has rustc v1.73
inputs.nixpkgs.url = inputs.nixpkgs.url =
"github:NixOS/nixpkgs/7c4869c47090dd7f9f1bdfb49a22aea026996815"; "github:NixOS/nixpkgs/9f2ee8c91ac42da3ae6c6a1d21555f283458247e";
# Rust overlay as of 2025-02-03
inputs.rust-overlay.url =
"github:oxalica/rust-overlay/35c6f8c4352f995ecd53896200769f80a3e8f22d";
inputs.rust-overlay.inputs.nixpkgs.follows = "nixpkgs";
inputs.crane.url = "github:ipetkov/crane";
inputs.flake-compat.url = "github:nix-community/flake-compat"; inputs.flake-compat.url = "github:nix-community/flake-compat";
inputs.flake-utils.url = "github:numtide/flake-utils";
outputs = { self, nixpkgs, flake-utils, crane, rust-overlay, ... }: inputs.cargo2nix = {
# As of 2022-10-18: two small patches over unstable branch, one for clippy and one to fix feature detection
url = "github:Alexis211/cargo2nix/a7a61179b66054904ef6a195d8da736eaaa06c36";
# As of 2023-04-25:
# - my two patches were merged into unstable (one for clippy and one to "fix" feature detection)
# - rustc v1.66
# url = "github:cargo2nix/cargo2nix/8fb57a670f7993bfc24099c33eb9c5abb51f29a2";
# Rust overlay as of 2024-02-07
inputs.rust-overlay.url =
"github:oxalica/rust-overlay/7a94fe7690d2bdfe1aab475382a505e14dc114a6";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-compat.follows = "flake-compat";
};
inputs.flake-utils.follows = "cargo2nix/flake-utils";
outputs = { self, nixpkgs, cargo2nix, flake-utils, ... }:
let let
git_version = self.lastModifiedDate;
compile = import ./nix/compile.nix; compile = import ./nix/compile.nix;
in in
flake-utils.lib.eachDefaultSystem (system: flake-utils.lib.eachDefaultSystem (system:
let let
pkgs = nixpkgs.legacyPackages.${system}; pkgs = nixpkgs.legacyPackages.${system};
packageFor = target: release: (compile {
inherit system target nixpkgs crane rust-overlay release;
}).garage;
testWith = extraTestEnv: (compile {
inherit system nixpkgs crane rust-overlay extraTestEnv;
release = false;
}).garage-test;
in in
{ {
packages = { packages =
let
packageFor = target: (compile {
inherit system git_version target;
pkgsSrc = nixpkgs;
cargo2nixOverlay = cargo2nix.overlays.default;
release = true;
}).workspace.garage { compileMode = "build"; };
in
{
# default = native release build # default = native release build
default = packageFor null true; default = packageFor null;
# other = cross-compiled, statically-linked builds
# <arch> = cross-compiled, statically-linked release builds amd64 = packageFor "x86_64-unknown-linux-musl";
amd64 = packageFor "x86_64-unknown-linux-musl" true; i386 = packageFor "i686-unknown-linux-musl";
i386 = packageFor "i686-unknown-linux-musl" true; arm64 = packageFor "aarch64-unknown-linux-musl";
arm64 = packageFor "aarch64-unknown-linux-musl" true; arm = packageFor "armv6l-unknown-linux-musl";
arm = packageFor "armv6l-unknown-linux-musl" true;
# dev = native dev build
dev = packageFor null false;
# test = cargo test
tests = testWith {};
tests-lmdb = testWith {
GARAGE_TEST_INTEGRATION_DB_ENGINE = "lmdb";
};
tests-sqlite = testWith {
GARAGE_TEST_INTEGRATION_DB_ENGINE = "sqlite";
};
}; };
# ---- developpment shell, for making native builds only ---- # ---- developpment shell, for making native builds only ----
devShells = devShells =
let let
targets = compile { shellWithPackages = (packages: (compile {
inherit system nixpkgs crane rust-overlay; inherit system git_version;
}; pkgsSrc = nixpkgs;
cargo2nixOverlay = cargo2nix.overlays.default;
}).workspaceShell { inherit packages; });
in in
{ {
default = targets.devShell; default = shellWithPackages
(with pkgs; [
rustfmt
clang
mold
]);
# import the full shell using `nix develop .#full` # import the full shell using `nix develop .#full`
full = pkgs.mkShell { full = shellWithPackages (with pkgs; [
buildInputs = with pkgs; [ rustfmt
targets.toolchain
protobuf
clang clang
mold mold
# ---- extra packages for dev tasks ---- # ---- extra packages for dev tasks ----
rust-analyzer
cargo-audit cargo-audit
cargo-outdated cargo-outdated
cargo-machete cargo-machete
nixpkgs-fmt nixpkgs-fmt
]; ]);
};
}; };
}); });
} }

View File

@ -2,7 +2,7 @@
with import ./common.nix; with import ./common.nix;
let let
pkgs = import nixpkgs { }; pkgs = import pkgsSrc { };
lib = pkgs.lib; lib = pkgs.lib;
/* Converts a key list and a value list to a set /* Converts a key list and a value list to a set

View File

@ -10,9 +10,9 @@ let
flake = (import flake-compat { system = builtins.currentSystem; src = ../.; }); flake = (import flake-compat { system = builtins.currentSystem; src = ../.; });
in in
rec {
{ pkgsSrc = flake.defaultNix.inputs.nixpkgs;
flake = flake.defaultNix; cargo2nix = flake.defaultNix.inputs.cargo2nix;
nixpkgs = flake.defaultNix.inputs.nixpkgs; cargo2nixOverlay = cargo2nix.overlays.default;
devShells = flake.defaultNix.devShells.${builtins.currentSystem}; devShells = builtins.getAttr builtins.currentSystem flake.defaultNix.devShells;
} }

View File

@ -1,64 +1,164 @@
{ { system, target ? null, pkgsSrc, cargo2nixOverlay, compiler ? "rustc"
/* build inputs */ , release ? false, git_version ? null, features ? null, }:
nixpkgs,
crane,
rust-overlay,
/* parameters */
system,
git_version ? null,
target ? null,
release ? false,
features ? null,
extraTestEnv ? {}
}:
let let
log = v: builtins.trace v v; log = v: builtins.trace v v;
# NixOS and Rust/Cargo triples do not match for ARM, fix it here.
rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
"arm-unknown-linux-musleabihf"
else
target;
rustTargetEnvMap = {
"x86_64-unknown-linux-musl" = "X86_64_UNKNOWN_LINUX_MUSL";
"aarch64-unknown-linux-musl" = "AARCH64_UNKNOWN_LINUX_MUSL";
"i686-unknown-linux-musl" = "I686_UNKNOWN_LINUX_MUSL";
"arm-unknown-linux-musleabihf" = "ARM_UNKNOWN_LINUX_MUSLEABIHF";
};
pkgsNative = import nixpkgs {
inherit system;
overlays = [ (import rust-overlay) ];
};
pkgs = if target != null then pkgs = if target != null then
import nixpkgs { import pkgsSrc {
inherit system; inherit system;
crossSystem = { crossSystem = {
config = target; config = target;
isStatic = true; isStatic = true;
}; };
overlays = [ (import rust-overlay) ]; overlays = [ cargo2nixOverlay ];
} }
else else
pkgsNative; import pkgsSrc {
inherit system;
overlays = [ cargo2nixOverlay ];
};
inherit (pkgs) lib stdenv; toolchainOptions = {
rustVersion = "1.73.0";
extraRustComponents = [ "clippy" ];
};
toolchainFn = (p: p.rust-bin.stable."1.82.0".default.override { buildEnv = (drv:
targets = lib.optionals (target != null) [ rustTarget ]; {
extensions = [ rustc = drv.setBuildEnv;
"rust-src" clippy = ''
"rustfmt" ${drv.setBuildEnv or ""}
echo
echo --- BUILDING WITH CLIPPY ---
echo
export NIX_RUST_BUILD_FLAGS="''${NIX_RUST_BUILD_FLAGS} --deny warnings"
export RUSTC="''${CLIPPY_DRIVER}"
'';
}.${compiler});
/* Cargo2nix provides many overrides by default, you can take inspiration from them:
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix
You can have a complete list of the available options by looking at the overriden object, mkcrate:
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix
*/
packageOverrides = pkgs:
pkgs.rustBuilder.overrides.all ++ [
/* [1] We add some logic to compile our crates with clippy, it provides us many additional lints
[2] We need to alter Nix hardening to make static binaries: PIE,
Position Independent Executables seems to be supported only on amd64. Having
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms.
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets
(only amd64 curently) through the `-static-pie` flag.
PIE is a feature used by ASLR, which helps mitigate security issues.
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh
[3] We want to inject the git version while keeping the build deterministic.
As we do not want to consider the .git folder as part of the input source,
we ask the user (the CI often) to pass the value to Nix.
[4] We don't want libsodium-sys and zstd-sys to try to use pkgconfig to build against a system library.
However the features to do so get activated for some reason (due to a bug in cargo2nix?),
so disable them manually here.
*/
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage";
overrideAttrs = drv:
(if git_version != null then {
# [3]
preConfigure = ''
${drv.preConfigure or ""}
export GIT_VERSION="${git_version}"
'';
} else
{ }) // {
# [1]
setBuildEnv = (buildEnv drv);
# [2]
hardeningDisable = [ "pie" ];
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_rpc";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_db";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_util";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_table";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_block";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_model";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_api";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "garage_web";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "k2v-client";
overrideAttrs = drv: { # [1]
setBuildEnv = (buildEnv drv);
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "libsodium-sys";
overrideArgs = old: {
features = [ ]; # [4]
};
})
(pkgs.rustBuilder.rustLib.makeOverride {
name = "zstd-sys";
overrideArgs = old: {
features = [ ]; # [4]
};
})
]; ];
});
craneLib = (crane.mkLib pkgs).overrideToolchain toolchainFn;
src = craneLib.cleanCargoSource ../.;
/* We ship some parts of the code disabled by default by putting them behind a flag. /* We ship some parts of the code disabled by default by putting them behind a flag.
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.). It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.).
@ -68,15 +168,16 @@ let
rootFeatures = if features != null then rootFeatures = if features != null then
features features
else else
([ "bundled-libs" "lmdb" "sqlite" "k2v" ] ++ (lib.optionals release [ ([ "garage/bundled-libs" "garage/sled" "garage/lmdb" "garage/sqlite" "garage/k2v" ] ++ (if release then [
"consul-discovery" "garage/consul-discovery"
"kubernetes-discovery" "garage/kubernetes-discovery"
"metrics" "garage/metrics"
"telemetry-otlp" "garage/telemetry-otlp"
"syslog" "garage/syslog"
])); ] else
[ ]));
featuresStr = lib.concatStringsSep "," rootFeatures; packageFun = import ../Cargo.nix;
/* We compile fully static binaries with musl to simplify deployment on most systems. /* We compile fully static binaries with musl to simplify deployment on most systems.
When possible, we reactivate PIE hardening (see above). When possible, we reactivate PIE hardening (see above).
@ -87,9 +188,12 @@ let
For more information on static builds, please refer to Rust's RFC 1721. For more information on static builds, please refer to Rust's RFC 1721.
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage
*/ */
codegenOptsMap = {
"x86_64-unknown-linux-musl" = codegenOpts = {
[ "target-feature=+crt-static" "link-arg=-static-pie" ]; "armv6l-unknown-linux-musleabihf" = [
"target-feature=+crt-static"
"link-arg=-static"
]; # compile as dynamic with static-pie
"aarch64-unknown-linux-musl" = [ "aarch64-unknown-linux-musl" = [
"target-feature=+crt-static" "target-feature=+crt-static"
"link-arg=-static" "link-arg=-static"
@ -98,95 +202,17 @@ let
"target-feature=+crt-static" "target-feature=+crt-static"
"link-arg=-static" "link-arg=-static"
]; # segfault with static-pie ]; # segfault with static-pie
"armv6l-unknown-linux-musleabihf" = [ "x86_64-unknown-linux-musl" =
"target-feature=+crt-static" [ "target-feature=+crt-static" "link-arg=-static-pie" ];
"link-arg=-static"
]; # compile as dynamic with static-pie
}; };
codegenOpts = if target != null then codegenOptsMap.${target} else [ # NixOS and Rust/Cargo triples do not match for ARM, fix it here.
"link-arg=-fuse-ld=mold" rustTarget = if target == "armv6l-unknown-linux-musleabihf" then
]; "arm-unknown-linux-musleabihf"
else
target;
commonArgs = in pkgs.rustBuilder.makePackageSet ({
{ inherit release packageFun packageOverrides codegenOpts rootFeatures;
inherit src; target = rustTarget;
pname = "garage"; } // toolchainOptions)
version = "dev";
strictDeps = true;
cargoExtraArgs = "--locked --features ${featuresStr}";
cargoTestExtraArgs = "--workspace";
nativeBuildInputs = [
pkgsNative.protobuf
pkgs.stdenv.cc
] ++ lib.optionals (target == null) [
pkgs.clang
pkgs.mold
];
CARGO_PROFILE = if release then "release" else "dev";
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
(builtins.map (flag: "-C ${flag}") codegenOpts);
}
//
(if rustTarget != null then {
CARGO_BUILD_TARGET = rustTarget;
"CARGO_TARGET_${rustTargetEnvMap.${rustTarget}}_LINKER" = "${stdenv.cc.targetPrefix}cc";
HOST_CC = "${stdenv.cc.nativePrefix}cc";
TARGET_CC = "${stdenv.cc.targetPrefix}cc";
} else {
CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER = "clang";
});
in rec {
toolchain = toolchainFn pkgs;
devShell = pkgs.mkShell {
buildInputs = [
toolchain
] ++ (with pkgs; [
protobuf
clang
mold
]);
};
# ---- building garage ----
garage-deps = craneLib.buildDepsOnly commonArgs;
garage = craneLib.buildPackage (commonArgs // {
cargoArtifacts = garage-deps;
doCheck = false;
} //
(if git_version != null then {
version = git_version;
GIT_VERSION = git_version;
} else {}));
# ---- testing garage ----
garage-test-bin = craneLib.cargoBuild (commonArgs // {
cargoArtifacts = garage-deps;
pname = "garage-tests";
CARGO_PROFILE = "test";
cargoExtraArgs = "${commonArgs.cargoExtraArgs} --tests --workspace";
doCheck = false;
});
garage-test = craneLib.cargoTest (commonArgs // {
cargoArtifacts = garage-test-bin;
nativeBuildInputs = commonArgs.nativeBuildInputs ++ [
pkgs.cacert
];
} // extraTestEnv);
}

View File

@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.6.0 version: 0.4.2
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "v1.0.1" appVersion: "v0.9.4"

View File

@ -1,86 +0,0 @@
# garage
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.0.1](https://img.shields.io/badge/AppVersion-v1.0.1-informational?style=flat-square)
S3-compatible object store for small self-hosted geo-distributed deployments
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
| deployment.kind | string | `"StatefulSet"` | Switchable to DaemonSet |
| deployment.podManagementPolicy | string | `"OrderedReady"` | If using statefulset, allow Parallel or OrderedReady (default) |
| deployment.replicaCount | int | `3` | Number of StatefulSet replicas/garage nodes to start |
| environment | object | `{}` | |
| extraVolumeMounts | object | `{}` | |
| extraVolumes | object | `{}` | |
| fullnameOverride | string | `""` | |
| garage.blockSize | string | `"1048576"` | Defaults is 1MB An increase can result in better performance in certain scenarios https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size |
| garage.bootstrapPeers | list | `[]` | This is not required if you use the integrated kubernetes discovery |
| garage.compressionLevel | string | `"1"` | zstd compression level of stored blocks https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level |
| garage.dbEngine | string | `"lmdb"` | Can be changed for better performance on certain systems https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 |
| garage.existingConfigMap | string | `""` | if not empty string, allow using an existing ConfigMap for the garage.toml, if set, ignores garage.toml |
| garage.garageTomlString | string | `""` | String Template for the garage configuration if set, ignores above values. Values can be templated, see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ |
| garage.kubernetesSkipCrd | bool | `false` | Set to true if you want to use k8s discovery but install the CRDs manually outside of the helm chart, for example if you operate at namespace level without cluster ressources |
| garage.replicationMode | string | `"3"` | Default to 3 replicas, see the replication_mode section at https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode |
| garage.rpcBindAddr | string | `"[::]:3901"` | |
| garage.rpcSecret | string | `""` | If not given, a random secret will be generated and stored in a Secret object |
| garage.s3.api.region | string | `"garage"` | |
| garage.s3.api.rootDomain | string | `".s3.garage.tld"` | |
| garage.s3.web.index | string | `"index.html"` | |
| garage.s3.web.rootDomain | string | `".web.garage.tld"` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"dxflrs/amd64_garage"` | default to amd64 docker image |
| image.tag | string | `""` | set the image tag, please prefer using the chart version and not this to avoid compatibility issues |
| imagePullSecrets | list | `[]` | set if you need credentials to pull your custom image |
| ingress.s3.api.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
| ingress.s3.api.enabled | bool | `false` | |
| ingress.s3.api.hosts[0] | object | `{"host":"s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, to be used with awscli for example |
| ingress.s3.api.hosts[1] | object | `{"host":"*.s3.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | garage S3 API endpoint, DNS style bucket access |
| ingress.s3.api.labels | object | `{}` | |
| ingress.s3.api.tls | list | `[]` | |
| ingress.s3.web.annotations | object | `{}` | Rely _either_ on the className or the annotation below but not both! If you want to use the className, set className: "nginx" and replace "nginx" by an Ingress controller name, examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers). |
| ingress.s3.web.enabled | bool | `false` | |
| ingress.s3.web.hosts[0] | object | `{"host":"*.web.garage.tld","paths":[{"path":"/","pathType":"Prefix"}]}` | wildcard website access with bucket name prefix |
| ingress.s3.web.hosts[1] | object | `{"host":"mywebpage.example.com","paths":[{"path":"/","pathType":"Prefix"}]}` | specific bucket access with FQDN bucket |
| ingress.s3.web.labels | object | `{}` | |
| ingress.s3.web.tls | list | `[]` | |
| initImage.pullPolicy | string | `"IfNotPresent"` | |
| initImage.repository | string | `"busybox"` | |
| initImage.tag | string | `"stable"` | |
| monitoring.metrics.enabled | bool | `false` | If true, a service for monitoring is created with a prometheus.io/scrape annotation |
| monitoring.metrics.serviceMonitor.enabled | bool | `false` | If true, a ServiceMonitor CRD is created for a prometheus operator https://github.com/coreos/prometheus-operator |
| monitoring.metrics.serviceMonitor.interval | string | `"15s"` | |
| monitoring.metrics.serviceMonitor.labels | object | `{}` | |
| monitoring.metrics.serviceMonitor.path | string | `"/metrics"` | |
| monitoring.metrics.serviceMonitor.relabelings | list | `[]` | |
| monitoring.metrics.serviceMonitor.scheme | string | `"http"` | |
| monitoring.metrics.serviceMonitor.scrapeTimeout | string | `"10s"` | |
| monitoring.metrics.serviceMonitor.tlsConfig | object | `{}` | |
| monitoring.tracing.sink | string | `""` | specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317` |
| nameOverride | string | `""` | |
| nodeSelector | object | `{}` | |
| persistence.data.hostPath | string | `"/var/lib/garage/data"` | |
| persistence.data.size | string | `"100Mi"` | |
| persistence.enabled | bool | `true` | |
| persistence.meta.hostPath | string | `"/var/lib/garage/meta"` | |
| persistence.meta.size | string | `"100Mi"` | |
| podAnnotations | object | `{}` | additonal pod annotations |
| podSecurityContext.fsGroup | int | `1000` | |
| podSecurityContext.runAsGroup | int | `1000` | |
| podSecurityContext.runAsNonRoot | bool | `true` | |
| podSecurityContext.runAsUser | int | `1000` | |
| resources | object | `{}` | |
| securityContext.capabilities | object | `{"drop":["ALL"]}` | The default security context is heavily restricted, feel free to tune it to your requirements |
| securityContext.readOnlyRootFilesystem | bool | `true` | |
| service.s3.api.port | int | `3900` | |
| service.s3.web.port | int | `3902` | |
| service.type | string | `"ClusterIP"` | You can rely on any service to expose your cluster - ClusterIP (+ Ingress) - NodePort (+ Ingress) - LoadBalancer |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
| tolerations | list | `[]` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@ -1,49 +1,7 @@
{{- if not .Values.garage.existingConfigMap }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: {{ include "garage.fullname" . }}-config name: {{ include "garage.fullname" . }}-config
data: data:
garage.toml: |- garage.toml: |-
{{- if .Values.garage.garageTomlString }} {{- tpl (index (index .Values.garage) "garage.toml") $ | nindent 4 }}
{{- tpl (index (index .Values.garage) "garageTomlString") $ | nindent 4 }}
{{- else }}
metadata_dir = "/mnt/meta"
data_dir = "/mnt/data"
db_engine = "{{ .Values.garage.dbEngine }}"
block_size = {{ .Values.garage.blockSize }}
replication_mode = "{{ .Values.garage.replicationMode }}"
compression_level = {{ .Values.garage.compressionLevel }}
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
[admin]
api_bind_addr = "[::]:3903"
{{- if .Values.monitoring.tracing.sink }}
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
{{- end }}
{{- end }}
{{- end }}

View File

@ -11,7 +11,6 @@ spec:
{{- if eq .Values.deployment.kind "StatefulSet" }} {{- if eq .Values.deployment.kind "StatefulSet" }}
replicas: {{ .Values.deployment.replicaCount }} replicas: {{ .Values.deployment.replicaCount }}
serviceName: {{ include "garage.fullname" . }} serviceName: {{ include "garage.fullname" . }}
podManagementPolicy: {{ .Values.deployment.podManagementPolicy }}
{{- end }} {{- end }}
template: template:
metadata: metadata:
@ -64,10 +63,6 @@ spec:
name: web-api name: web-api
- containerPort: 3903 - containerPort: 3903
name: admin name: admin
{{- with .Values.environment }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts: volumeMounts:
- name: meta - name: meta
mountPath: /mnt/meta mountPath: /mnt/meta
@ -76,9 +71,6 @@ spec:
- name: etc - name: etc
mountPath: /etc/garage.toml mountPath: /etc/garage.toml
subPath: garage.toml subPath: garage.toml
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
# TODO # TODO
# livenessProbe: # livenessProbe:
# httpGet: # httpGet:
@ -113,9 +105,6 @@ spec:
- name: data - name: data
emptyDir: {} emptyDir: {}
{{- end }} {{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}

View File

@ -4,30 +4,33 @@
# Garage configuration. These values go to garage.toml # Garage configuration. These values go to garage.toml
garage: garage:
# -- Can be changed for better performance on certain systems # Can be changed for better performance on certain systems
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0 # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0
dbEngine: "lmdb" dbEngine: "sled"
# -- Defaults is 1MB # Defaults is 1MB
# An increase can result in better performance in certain scenarios # An increase can result in better performance in certain scenarios
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#block-size
blockSize: "1048576" blockSize: "1048576"
# -- Default to 3 replicas, see the replication_mode section at # Tuning parameters for the sled DB engine
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#sled-cache-capacity
sledCacheCapacity: "134217728"
sledFlushEveryMs: "2000"
# Default to 3 replicas, see the replication_mode section at
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode
replicationMode: "3" replicationMode: "3"
# -- zstd compression level of stored blocks # zstd compression level of stored blocks
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#compression-level
compressionLevel: "1" compressionLevel: "1"
rpcBindAddr: "[::]:3901" rpcBindAddr: "[::]:3901"
# -- If not given, a random secret will be generated and stored in a Secret object # If not given, a random secret will be generated and stored in a Secret object
rpcSecret: "" rpcSecret: ""
# -- This is not required if you use the integrated kubernetes discovery # This is not required if you use the integrated kubernetes discovery
bootstrapPeers: [] bootstrapPeers: []
# -- Set to true if you want to use k8s discovery but install the CRDs manually outside
# of the helm chart, for example if you operate at namespace level without cluster ressources
kubernetesSkipCrd: false kubernetesSkipCrd: false
s3: s3:
api: api:
@ -36,16 +39,52 @@ garage:
web: web:
rootDomain: ".web.garage.tld" rootDomain: ".web.garage.tld"
index: "index.html" index: "index.html"
# Template for the garage configuration
# Values can be templated
# ref: https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
garage.toml: |-
metadata_dir = "/mnt/meta"
data_dir = "/mnt/data"
# -- if not empty string, allow using an existing ConfigMap for the garage.toml, db_engine = "{{ .Values.garage.dbEngine }}"
# if set, ignores garage.toml
existingConfigMap: ""
# -- String Template for the garage configuration block_size = {{ .Values.garage.blockSize }}
# if set, ignores above values.
# Values can be templated, {{- if eq .Values.garage.dbEngine "sled"}}
# see https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ sled_cache_capacity = {{ .Values.garage.sledCacheCapacity }}
garageTomlString: "" sled_flush_every_ms = {{ .Values.garage.sledFlushEveryMs }}
{{- end }}
replication_mode = "{{ .Values.garage.replicationMode }}"
compression_level = {{ .Values.garage.compressionLevel }}
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
[admin]
api_bind_addr = "[::]:3903"
{{- if .Values.monitoring.tracing.sink }}
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
{{- end }}
# Data persistence # Data persistence
persistence: persistence:
@ -63,18 +102,14 @@ persistence:
# Deployment configuration # Deployment configuration
deployment: deployment:
# -- Switchable to DaemonSet # Switchable to DaemonSet
kind: StatefulSet kind: StatefulSet
# -- Number of StatefulSet replicas/garage nodes to start # Number of StatefulSet replicas/garage nodes to start
replicaCount: 3 replicaCount: 3
# -- If using statefulset, allow Parallel or OrderedReady (default)
podManagementPolicy: OrderedReady
image: image:
# -- default to amd64 docker image
repository: dxflrs/amd64_garage repository: dxflrs/amd64_garage
# -- set the image tag, please prefer using the chart version and not this # please prefer using the chart version and not this tag
# to avoid compatibility issues
tag: "" tag: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@ -83,21 +118,19 @@ initImage:
tag: stable tag: stable
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# -- set if you need credentials to pull your custom image
imagePullSecrets: [] imagePullSecrets: []
nameOverride: "" nameOverride: ""
fullnameOverride: "" fullnameOverride: ""
serviceAccount: serviceAccount:
# -- Specifies whether a service account should be created # Specifies whether a service account should be created
create: true create: true
# -- Annotations to add to the service account # Annotations to add to the service account
annotations: {} annotations: {}
# -- The name of the service account to use. # The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template # If not set and create is true, a name is generated using the fullname template
name: "" name: ""
# -- additonal pod annotations
podAnnotations: {} podAnnotations: {}
podSecurityContext: podSecurityContext:
@ -107,7 +140,7 @@ podSecurityContext:
runAsNonRoot: true runAsNonRoot: true
securityContext: securityContext:
# -- The default security context is heavily restricted, # The default security context is heavily restricted
# feel free to tune it to your requirements # feel free to tune it to your requirements
capabilities: capabilities:
drop: drop:
@ -115,7 +148,7 @@ securityContext:
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
service: service:
# -- You can rely on any service to expose your cluster # You can rely on any service to expose your cluster
# - ClusterIP (+ Ingress) # - ClusterIP (+ Ingress)
# - NodePort (+ Ingress) # - NodePort (+ Ingress)
# - LoadBalancer # - LoadBalancer
@ -131,23 +164,20 @@ ingress:
s3: s3:
api: api:
enabled: false enabled: false
# -- Rely _either_ on the className or the annotation below but not both! # Rely either on the className or the annotation below but not both
# If you want to use the className, set # replace "nginx" by an Ingress controller
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
# className: "nginx" # className: "nginx"
# and replace "nginx" by an Ingress controller name,
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
annotations: {} annotations: {}
# kubernetes.io/ingress.class: "nginx" # kubernetes.io/ingress.class: "nginx"
# kubernetes.io/tls-acme: "true" # kubernetes.io/tls-acme: "true"
labels: {} labels: {}
hosts: hosts:
# -- garage S3 API endpoint, to be used with awscli for example - host: "s3.garage.tld" # garage S3 API endpoint
- host: "s3.garage.tld"
paths: paths:
- path: / - path: /
pathType: Prefix pathType: Prefix
# -- garage S3 API endpoint, DNS style bucket access - host: "*.s3.garage.tld" # garage S3 API endpoint, DNS style bucket access
- host: "*.s3.garage.tld"
paths: paths:
- path: / - path: /
pathType: Prefix pathType: Prefix
@ -157,23 +187,20 @@ ingress:
# - kubernetes.docker.internal # - kubernetes.docker.internal
web: web:
enabled: false enabled: false
# -- Rely _either_ on the className or the annotation below but not both! # Rely either on the className or the annotation below but not both
# If you want to use the className, set # replace "nginx" by an Ingress controller
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
# className: "nginx" # className: "nginx"
# and replace "nginx" by an Ingress controller name,
# examples [here](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers).
annotations: {} annotations: {}
# kubernetes.io/ingress.class: nginx # kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true" # kubernetes.io/tls-acme: "true"
labels: {} labels: {}
hosts: hosts:
# -- wildcard website access with bucket name prefix - host: "*.web.garage.tld" # wildcard website access with bucket name prefix
- host: "*.web.garage.tld"
paths: paths:
- path: / - path: /
pathType: Prefix pathType: Prefix
# -- specific bucket access with FQDN bucket - host: "mywebpage.example.com" # specific bucket access with FQDN bucket
- host: "mywebpage.example.com"
paths: paths:
- path: / - path: /
pathType: Prefix pathType: Prefix
@ -197,18 +224,12 @@ tolerations: []
affinity: {} affinity: {}
environment: {}
extraVolumes: {}
extraVolumeMounts: {}
monitoring: monitoring:
metrics: metrics:
# -- If true, a service for monitoring is created with a prometheus.io/scrape annotation # If true, a service for monitoring is created with a prometheus.io/scrape annotation
enabled: false enabled: false
serviceMonitor: serviceMonitor:
# -- If true, a ServiceMonitor CRD is created for a prometheus operator # If true, a ServiceMonitor CRD is created for a prometheus operator
# https://github.com/coreos/prometheus-operator # https://github.com/coreos/prometheus-operator
enabled: false enabled: false
path: /metrics path: /metrics
@ -220,5 +241,4 @@ monitoring:
scrapeTimeout: 10s scrapeTimeout: 10s
relabelings: [] relabelings: []
tracing: tracing:
# -- specify a sink endpoint for OpenTelemetry Traces, eg. `http://localhost:4317`
sink: "" sink: ""

View File

@ -7,12 +7,7 @@ if [ "$#" -ne 1 ]; then
exit 2 exit 2
fi fi
if [ ! -x "$1" ]; then if file $1 | grep 'dynamically linked' 2>&1; then
echo "[fail] $1 does not exist or is not an executable"
exit 1
fi
if file "$1" | grep 'dynamically linked' 2>&1; then
echo "[fail] $1 is dynamic" echo "[fail] $1 is dynamic"
exit 1 exit 1
fi fi

View File

@ -82,19 +82,6 @@ if [ -z "$SKIP_AWS" ]; then
exit 1 exit 1
fi fi
aws s3api delete-object --bucket eprouvette --key upload aws s3api delete-object --bucket eprouvette --key upload
echo "🛠️ Test SSE-C with awscli (aws s3)"
SSEC_KEY="u8zCfnEyt5Imo/krN+sxA1DQXxLWtPJavU6T6gOVj1Y="
SSEC_KEY_MD5="jMGbs3GyZkYjJUP6q5jA7g=="
echo "$SSEC_KEY" | base64 -d > /tmp/garage.ssec-key
for idx in {1,2}.rnd; do
aws s3 cp --sse-c AES256 --sse-c-key fileb:///tmp/garage.ssec-key \
"/tmp/garage.$idx" "s3://eprouvette/garage.$idx.aws.sse-c"
aws s3 cp --sse-c AES256 --sse-c-key fileb:///tmp/garage.ssec-key \
"s3://eprouvette/garage.$idx.aws.sse-c" "/tmp/garage.$idx.dl.sse-c"
diff "/tmp/garage.$idx" "/tmp/garage.$idx.dl.sse-c"
aws s3api delete-object --bucket eprouvette --key "garage.$idx.aws.sse-c"
done
fi fi
# S3CMD # S3CMD

View File

@ -3,7 +3,7 @@
with import ./nix/common.nix; with import ./nix/common.nix;
let let
pkgs = import nixpkgs { pkgs = import pkgsSrc {
inherit system; inherit system;
}; };
winscp = (import ./nix/winscp.nix) pkgs; winscp = (import ./nix/winscp.nix) pkgs;
@ -11,7 +11,6 @@ in
{ {
# --- Dev shell inherited from flake.nix --- # --- Dev shell inherited from flake.nix ---
devShell = devShells.default; devShell = devShells.default;
devShellFull = devShells.full;
# --- Continuous integration shell --- # --- Continuous integration shell ---
# The shell used for all CI jobs (along with devShell) # The shell used for all CI jobs (along with devShell)
@ -39,7 +38,7 @@ in
--endpoint-url https://garage.deuxfleurs.fr \ --endpoint-url https://garage.deuxfleurs.fr \
--region garage \ --region garage \
s3 cp \ s3 cp \
./result/bin/garage \ ./result-bin/bin/garage \
s3://garagehq.deuxfleurs.fr/_releases/''${CI_COMMIT_TAG:-$CI_COMMIT_SHA}/''${TARGET}/garage s3://garagehq.deuxfleurs.fr/_releases/''${CI_COMMIT_TAG:-$CI_COMMIT_SHA}/''${TARGET}/garage
} }
@ -115,7 +114,7 @@ in
shellHook = '' shellHook = ''
function refresh_cache { function refresh_cache {
pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec pass show deuxfleurs/nix_priv_key > /tmp/nix-signing-key.sec
for attr in pkgs.amd64.debug test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do for attr in clippy.amd64 test.amd64 pkgs.{amd64,i386,arm,arm64}.release; do
echo "Updating cache for ''${attr}" echo "Updating cache for ''${attr}"
nix copy -j8 \ nix copy -j8 \
--to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \ --to 's3://nix?endpoint=garage.deuxfleurs.fr&region=garage&secret-key=/tmp/nix-signing-key.sec' \

View File

@ -1,6 +1,6 @@
[package] [package]
name = "garage_api_s3" name = "garage_api"
version = "1.0.1" version = "0.9.4"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -20,27 +20,27 @@ garage_block.workspace = true
garage_net.workspace = true garage_net.workspace = true
garage_util.workspace = true garage_util.workspace = true
garage_rpc.workspace = true garage_rpc.workspace = true
garage_api_common.workspace = true
aes-gcm.workspace = true argon2.workspace = true
async-compression.workspace = true async-trait.workspace = true
base64.workspace = true base64.workspace = true
bytes.workspace = true bytes.workspace = true
chrono.workspace = true chrono.workspace = true
crc32fast.workspace = true crypto-common.workspace = true
crc32c.workspace = true
err-derive.workspace = true err-derive.workspace = true
hex.workspace = true hex.workspace = true
hmac.workspace = true
idna.workspace = true
tracing.workspace = true tracing.workspace = true
md-5.workspace = true md-5.workspace = true
nom.workspace = true
pin-project.workspace = true pin-project.workspace = true
sha1.workspace = true
sha2.workspace = true sha2.workspace = true
futures.workspace = true futures.workspace = true
futures-util.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-stream.workspace = true tokio-stream.workspace = true
tokio-util.workspace = true
form_urlencoded.workspace = true form_urlencoded.workspace = true
http.workspace = true http.workspace = true
@ -48,13 +48,21 @@ httpdate.workspace = true
http-range.workspace = true http-range.workspace = true
http-body-util.workspace = true http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] } hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
hyper-util.workspace = true
multer.workspace = true multer.workspace = true
percent-encoding.workspace = true percent-encoding.workspace = true
roxmltree.workspace = true roxmltree.workspace = true
url.workspace = true url.workspace = true
serde.workspace = true serde.workspace = true
serde_bytes.workspace = true
serde_json.workspace = true serde_json.workspace = true
quick-xml.workspace = true quick-xml.workspace = true
opentelemetry.workspace = true opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
prometheus = { workspace = true, optional = true }
[features]
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
metrics = [ "opentelemetry-prometheus", "prometheus" ]

View File

@ -1,43 +0,0 @@
[package]
name = "garage_api_admin"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "Admin API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model.workspace = true
garage_table.workspace = true
garage_util.workspace = true
garage_rpc.workspace = true
garage_api_common.workspace = true
argon2.workspace = true
async-trait.workspace = true
err-derive.workspace = true
hex.workspace = true
tracing.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
prometheus = { workspace = true, optional = true }
[features]
metrics = [ "opentelemetry-prometheus", "prometheus" ]

View File

@ -2,6 +2,7 @@ use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use argon2::password_hash::PasswordHash; use argon2::password_hash::PasswordHash;
use async_trait::async_trait;
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW}; use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode}; use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
@ -19,15 +20,15 @@ use garage_rpc::system::ClusterHealthStatus;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use garage_util::socket_address::UnixOrTCPSocketAddress; use garage_util::socket_address::UnixOrTCPSocketAddress;
use garage_api_common::generic_server::*; use crate::generic_server::*;
use garage_api_common::helpers::*;
use crate::bucket::*; use crate::admin::bucket::*;
use crate::cluster::*; use crate::admin::cluster::*;
use crate::error::*; use crate::admin::error::*;
use crate::key::*; use crate::admin::key::*;
use crate::router_v0; use crate::admin::router_v0;
use crate::router_v1::{Authorization, Endpoint}; use crate::admin::router_v1::{Authorization, Endpoint};
use crate::helpers::*;
pub type ResBody = BoxBody<Error>; pub type ResBody = BoxBody<Error>;
@ -220,6 +221,7 @@ impl AdminApiServer {
} }
} }
#[async_trait]
impl ApiHandler for AdminApiServer { impl ApiHandler for AdminApiServer {
const API_NAME: &'static str = "admin"; const API_NAME: &'static str = "admin";
const API_NAME_DISPLAY: &'static str = "Admin"; const API_NAME_DISPLAY: &'static str = "Admin";
@ -274,7 +276,7 @@ impl ApiHandler for AdminApiServer {
Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await, Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await, Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await, Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await, Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await,
// Keys // Keys
Endpoint::ListKeys => handle_list_keys(&self.garage).await, Endpoint::ListKeys => handle_list_keys(&self.garage).await,
Endpoint::GetKeyInfo { Endpoint::GetKeyInfo {

View File

@ -17,12 +17,11 @@ use garage_model::permission::*;
use garage_model::s3::mpu_table; use garage_model::s3::mpu_table;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_api_common::common_error::CommonError; use crate::admin::api_server::ResBody;
use garage_api_common::helpers::*; use crate::admin::error::*;
use crate::admin::key::ApiBucketKeyPerm;
use crate::api_server::ResBody; use crate::common_error::CommonError;
use crate::error::*; use crate::helpers::*;
use crate::key::ApiBucketKeyPerm;
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> { pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let buckets = garage let buckets = garage
@ -124,7 +123,7 @@ async fn bucket_info_results(
.table .table
.get(&bucket_id, &EmptyKey) .get(&bucket_id, &EmptyKey)
.await? .await?
.map(|x| x.filtered_values(&garage.system.cluster_layout())) .map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default(); .unwrap_or_default();
let mpu_counters = garage let mpu_counters = garage
@ -132,7 +131,7 @@ async fn bucket_info_results(
.table .table
.get(&bucket_id, &EmptyKey) .get(&bucket_id, &EmptyKey)
.await? .await?
.map(|x| x.filtered_values(&garage.system.cluster_layout())) .map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default(); .unwrap_or_default();
let mut relevant_keys = HashMap::new(); let mut relevant_keys = HashMap::new();

View File

@ -1,4 +1,3 @@
use std::collections::HashMap;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
@ -12,105 +11,30 @@ use garage_rpc::layout;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_api_common::helpers::{json_ok_response, parse_json_body}; use crate::admin::api_server::ResBody;
use crate::admin::error::*;
use crate::api_server::ResBody; use crate::helpers::{json_ok_response, parse_json_body};
use crate::error::*;
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> { pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let layout = garage.system.cluster_layout();
let mut nodes = garage
.system
.get_known_nodes()
.into_iter()
.map(|i| {
(
i.id,
NodeResp {
id: hex::encode(i.id),
addr: i.addr,
hostname: i.status.hostname,
is_up: i.is_up,
last_seen_secs_ago: i.last_seen_secs_ago,
data_partition: i
.status
.data_disk_avail
.map(|(avail, total)| FreeSpaceResp {
available: avail,
total,
}),
metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
FreeSpaceResp {
available: avail,
total,
}
}),
..Default::default()
},
)
})
.collect::<HashMap<_, _>>();
for (id, _, role) in layout.current().roles.items().iter() {
if let layout::NodeRoleV(Some(r)) = role {
let role = NodeRoleResp {
id: hex::encode(id),
zone: r.zone.to_string(),
capacity: r.capacity,
tags: r.tags.clone(),
};
match nodes.get_mut(id) {
None => {
nodes.insert(
*id,
NodeResp {
id: hex::encode(id),
role: Some(role),
..Default::default()
},
);
}
Some(n) => {
n.role = Some(role);
}
}
}
}
for ver in layout.versions().iter().rev().skip(1) {
for (id, _, role) in ver.roles.items().iter() {
if let layout::NodeRoleV(Some(r)) = role {
if r.capacity.is_some() {
if let Some(n) = nodes.get_mut(id) {
if n.role.is_none() {
n.draining = true;
}
} else {
nodes.insert(
*id,
NodeResp {
id: hex::encode(id),
draining: true,
..Default::default()
},
);
}
}
}
}
}
let mut nodes = nodes.into_values().collect::<Vec<_>>();
nodes.sort_by(|x, y| x.id.cmp(&y.id));
let res = GetClusterStatusResponse { let res = GetClusterStatusResponse {
node: hex::encode(garage.system.id), node: hex::encode(garage.system.id),
garage_version: garage_util::version::garage_version(), garage_version: garage_util::version::garage_version(),
garage_features: garage_util::version::garage_features(), garage_features: garage_util::version::garage_features(),
rust_version: garage_util::version::rust_version(), rust_version: garage_util::version::rust_version(),
db_engine: garage.db.engine(), db_engine: garage.db.engine(),
layout_version: layout.current().version, known_nodes: garage
nodes, .system
.get_known_nodes()
.into_iter()
.map(|i| KnownNodeResp {
id: hex::encode(i.id),
addr: i.addr,
is_up: i.is_up,
last_seen_secs_ago: i.last_seen_secs_ago,
hostname: i.status.hostname,
})
.collect(),
layout: format_cluster_layout(&garage.system.get_cluster_layout()),
}; };
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
@ -161,14 +85,13 @@ pub async fn handle_connect_cluster_nodes(
} }
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> { pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let res = format_cluster_layout(garage.system.cluster_layout().inner()); let res = format_cluster_layout(&garage.system.get_cluster_layout());
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
} }
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse { fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResponse {
let roles = layout let roles = layout
.current()
.roles .roles
.items() .items()
.iter() .iter()
@ -182,12 +105,10 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let staged_role_changes = layout let staged_role_changes = layout
.staging .staging_roles
.get()
.roles
.items() .items()
.iter() .iter()
.filter(|(k, _, v)| layout.current().roles.get(k) != Some(v)) .filter(|(k, _, v)| layout.roles.get(k) != Some(v))
.map(|(k, _, v)| match &v.0 { .map(|(k, _, v)| match &v.0 {
None => NodeRoleChange { None => NodeRoleChange {
id: hex::encode(k), id: hex::encode(k),
@ -205,7 +126,7 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
.collect::<Vec<_>>(); .collect::<Vec<_>>();
GetClusterLayoutResponse { GetClusterLayoutResponse {
version: layout.current().version, version: layout.version,
roles, roles,
staged_role_changes, staged_role_changes,
} }
@ -234,8 +155,8 @@ struct GetClusterStatusResponse {
garage_features: Option<&'static [&'static str]>, garage_features: Option<&'static [&'static str]>,
rust_version: &'static str, rust_version: &'static str,
db_engine: String, db_engine: String,
layout_version: u64, known_nodes: Vec<KnownNodeResp>,
nodes: Vec<NodeResp>, layout: GetClusterLayoutResponse,
} }
#[derive(Serialize)] #[derive(Serialize)]
@ -269,27 +190,14 @@ struct NodeRoleResp {
tags: Vec<String>, tags: Vec<String>,
} }
#[derive(Serialize, Default)] #[derive(Serialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct FreeSpaceResp { struct KnownNodeResp {
available: u64,
total: u64,
}
#[derive(Serialize, Default)]
#[serde(rename_all = "camelCase")]
struct NodeResp {
id: String, id: String,
role: Option<NodeRoleResp>, addr: SocketAddr,
addr: Option<SocketAddr>,
hostname: Option<String>,
is_up: bool, is_up: bool,
last_seen_secs_ago: Option<u64>, last_seen_secs_ago: Option<u64>,
draining: bool, hostname: String,
#[serde(skip_serializing_if = "Option::is_none")]
data_partition: Option<FreeSpaceResp>,
#[serde(skip_serializing_if = "Option::is_none")]
metadata_partition: Option<FreeSpaceResp>,
} }
// ---- update functions ---- // ---- update functions ----
@ -300,10 +208,10 @@ pub async fn handle_update_cluster_layout(
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?; let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
let mut layout = garage.system.cluster_layout().inner().clone(); let mut layout = garage.system.get_cluster_layout();
let mut roles = layout.current().roles.clone(); let mut roles = layout.roles.clone();
roles.merge(&layout.staging.get().roles); roles.merge(&layout.staging_roles);
for change in updates { for change in updates {
let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?; let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
@ -324,17 +232,11 @@ pub async fn handle_update_cluster_layout(
}; };
layout layout
.staging .staging_roles
.get_mut()
.roles
.merge(&roles.update_mutator(node, layout::NodeRoleV(new_role))); .merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
} }
garage garage.system.update_cluster_layout(&layout).await?;
.system
.layout_manager
.update_cluster_layout(&layout)
.await?;
let res = format_cluster_layout(&layout); let res = format_cluster_layout(&layout);
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
@ -344,16 +246,12 @@ pub async fn handle_apply_cluster_layout(
garage: &Arc<Garage>, garage: &Arc<Garage>,
req: Request<IncomingBody>, req: Request<IncomingBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?; let param = parse_json_body::<ApplyRevertLayoutRequest, _, Error>(req).await?;
let layout = garage.system.cluster_layout().inner().clone(); let layout = garage.system.get_cluster_layout();
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?; let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
garage garage.system.update_cluster_layout(&layout).await?;
.system
.layout_manager
.update_cluster_layout(&layout)
.await?;
let res = ApplyClusterLayoutResponse { let res = ApplyClusterLayoutResponse {
message: msg, message: msg,
@ -364,14 +262,13 @@ pub async fn handle_apply_cluster_layout(
pub async fn handle_revert_cluster_layout( pub async fn handle_revert_cluster_layout(
garage: &Arc<Garage>, garage: &Arc<Garage>,
req: Request<IncomingBody>,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let layout = garage.system.cluster_layout().inner().clone(); let param = parse_json_body::<ApplyRevertLayoutRequest, _, Error>(req).await?;
let layout = layout.revert_staged_changes()?;
garage let layout = garage.system.get_cluster_layout();
.system let layout = layout.revert_staged_changes(Some(param.version))?;
.layout_manager garage.system.update_cluster_layout(&layout).await?;
.update_cluster_layout(&layout)
.await?;
let res = format_cluster_layout(&layout); let res = format_cluster_layout(&layout);
Ok(json_ok_response(&res)?) Ok(json_ok_response(&res)?)
@ -383,7 +280,7 @@ type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct ApplyLayoutRequest { struct ApplyRevertLayoutRequest {
version: u64, version: u64,
} }

View File

@ -1,24 +1,20 @@
use std::convert::TryFrom;
use err_derive::Error; use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
pub use garage_model::helper::error::Error as HelperError; pub use garage_model::helper::error::Error as HelperError;
use garage_api_common::common_error::{commonErrorDerivative, CommonError}; use crate::common_error::CommonError;
pub use garage_api_common::common_error::{ pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError, use crate::generic_server::ApiError;
}; use crate::helpers::*;
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error(display = "{}", _0)] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[error(source)] CommonError), Common(CommonError),
// Category: cannot process // Category: cannot process
/// The API access key does not exist /// The API access key does not exist
@ -33,20 +29,16 @@ pub enum Error {
KeyAlreadyExists(String), KeyAlreadyExists(String),
} }
commonErrorDerivative!(Error); impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
}
}
/// FIXME: helper errors are transformed into their corresponding variants impl CommonErrorDerivative for Error {}
/// in the Error struct, but in many case a helper error should be considered
/// an internal error.
impl From<HelperError> for Error {
fn from(err: HelperError) -> Error {
match CommonError::try_from(err) {
Ok(ce) => Self::Common(ce),
Err(HelperError::NoSuchAccessKey(k)) => Self::NoSuchAccessKey(k),
Err(_) => unreachable!(),
}
}
}
impl Error { impl Error {
fn code(&self) -> &'static str { fn code(&self) -> &'static str {

View File

@ -9,10 +9,9 @@ use garage_table::*;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::*; use garage_model::key_table::*;
use garage_api_common::helpers::*; use crate::admin::api_server::ResBody;
use crate::admin::error::*;
use crate::api_server::ResBody; use crate::helpers::*;
use crate::error::*;
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> { pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
let res = garage let res = garage

View File

@ -1,6 +1,3 @@
#[macro_use]
extern crate tracing;
pub mod api_server; pub mod api_server;
mod error; mod error;
mod router_v0; mod router_v0;

View File

@ -2,9 +2,8 @@ use std::borrow::Cow;
use hyper::{Method, Request}; use hyper::{Method, Request};
use garage_api_common::router_macros::*; use crate::admin::error::*;
use crate::router_macros::*;
use crate::error::*;
router_match! {@func router_match! {@func

View File

@ -2,10 +2,9 @@ use std::borrow::Cow;
use hyper::{Method, Request}; use hyper::{Method, Request};
use garage_api_common::router_macros::*; use crate::admin::error::*;
use crate::admin::router_v0;
use crate::error::*; use crate::router_macros::*;
use crate::router_v0;
pub enum Authorization { pub enum Authorization {
None, None,

View File

@ -1,44 +0,0 @@
[package]
name = "garage_api_common"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "Common functions for the API server crates for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model.workspace = true
garage_table.workspace = true
garage_util.workspace = true
bytes.workspace = true
chrono.workspace = true
crypto-common.workspace = true
err-derive.workspace = true
hex.workspace = true
hmac.workspace = true
idna.workspace = true
tracing.workspace = true
nom.workspace = true
pin-project.workspace = true
sha2.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
hyper-util.workspace = true
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true

View File

@ -1,170 +0,0 @@
use std::sync::Arc;
use http::header::{
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
};
use hyper::{body::Body, body::Incoming as IncomingBody, Request, Response, StatusCode};
use garage_model::bucket_table::{BucketParams, CorsRule as GarageCorsRule};
use garage_model::garage::Garage;
use crate::common_error::{
helper_error_as_internal, CommonError, OkOrBadRequest, OkOrInternalError,
};
use crate::helpers::*;
pub fn find_matching_cors_rule<'a>(
bucket_params: &'a BucketParams,
req: &Request<impl Body>,
) -> Result<Option<&'a GarageCorsRule>, CommonError> {
if let Some(cors_config) = bucket_params.cors_config.get() {
if let Some(origin) = req.headers().get("Origin") {
let origin = origin.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
return Ok(cors_config.iter().find(|rule| {
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
}));
}
}
Ok(None)
}
pub fn cors_rule_matches<'a, HI, S>(
rule: &GarageCorsRule,
origin: &'a str,
method: &'a str,
mut request_headers: HI,
) -> bool
where
HI: Iterator<Item = S>,
S: AsRef<str>,
{
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
&& request_headers.all(|h| {
rule.allow_headers
.iter()
.any(|x| x == "*" || x == h.as_ref())
})
}
pub fn add_cors_headers(
resp: &mut Response<impl Body>,
rule: &GarageCorsRule,
) -> Result<(), http::header::InvalidHeaderValue> {
let h = resp.headers_mut();
h.insert(
ACCESS_CONTROL_ALLOW_ORIGIN,
rule.allow_origins.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_METHODS,
rule.allow_methods.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_HEADERS,
rule.allow_headers.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_EXPOSE_HEADERS,
rule.expose_headers.join(", ").parse()?,
);
Ok(())
}
pub async fn handle_options_api(
garage: Arc<Garage>,
req: &Request<IncomingBody>,
bucket_name: Option<String>,
) -> Result<Response<EmptyBody>, CommonError> {
// FIXME: CORS rules of buckets with local aliases are
// not taken into account.
// If the bucket name is a global bucket name,
// we try to apply the CORS rules of that bucket.
// If a user has a local bucket name that has
// the same name, its CORS rules won't be applied
// and will be shadowed by the rules of the globally
// existing bucket (but this is inevitable because
// OPTIONS calls are not auhtenticated).
if let Some(bn) = bucket_name {
let helper = garage.bucket_helper();
let bucket_id = helper
.resolve_global_bucket_name(&bn)
.await
.map_err(helper_error_as_internal)?;
if let Some(id) = bucket_id {
let bucket = garage
.bucket_helper()
.get_existing_bucket(id)
.await
.map_err(helper_error_as_internal)?;
let bucket_params = bucket.state.into_option().unwrap();
handle_options_for_bucket(req, &bucket_params)
} else {
// If there is a bucket name in the request, but that name
// does not correspond to a global alias for a bucket,
// then it's either a non-existing bucket or a local bucket.
// We have no way of knowing, because the request is not
// authenticated and thus we can't resolve local aliases.
// We take the permissive approach of allowing everything,
// because we don't want to prevent web apps that use
// local bucket names from making API calls.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
} else {
// If there is no bucket name in the request,
// we are doing a ListBuckets call, which we want to allow
// for all origins.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
}
pub fn handle_options_for_bucket(
req: &Request<IncomingBody>,
bucket_params: &BucketParams,
) -> Result<Response<EmptyBody>, CommonError> {
let origin = req
.headers()
.get("Origin")
.ok_or_bad_request("Missing Origin header")?
.to_str()?;
let request_method = req
.headers()
.get(ACCESS_CONTROL_REQUEST_METHOD)
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
if let Some(cors_config) = bucket_params.cors_config.get() {
let matching_rule = cors_config
.iter()
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
if let Some(rule) = matching_rule {
let mut resp = Response::builder()
.status(StatusCode::OK)
.body(EmptyBody::new())?;
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
return Ok(resp);
}
}
Err(CommonError::Forbidden(
"This CORS request is not allowed.".into(),
))
}

View File

@ -1,12 +0,0 @@
//! Crate for serving a S3 compatible API
#[macro_use]
extern crate tracing;
pub mod common_error;
pub mod cors;
pub mod encoding;
pub mod generic_server;
pub mod helpers;
pub mod router_macros;
pub mod signature;

View File

@ -1,5 +1,3 @@
use std::convert::TryFrom;
use err_derive::Error; use err_derive::Error;
use hyper::StatusCode; use hyper::StatusCode;
@ -57,40 +55,13 @@ pub enum CommonError {
InvalidBucketName(String), InvalidBucketName(String),
} }
#[macro_export]
macro_rules! commonErrorDerivative {
( $error_struct: ident ) => {
impl From<garage_util::error::Error> for $error_struct {
fn from(err: garage_util::error::Error) -> Self {
Self::Common(CommonError::InternalError(err))
}
}
impl From<http::Error> for $error_struct {
fn from(err: http::Error) -> Self {
Self::Common(CommonError::Http(err))
}
}
impl From<hyper::Error> for $error_struct {
fn from(err: hyper::Error) -> Self {
Self::Common(CommonError::Hyper(err))
}
}
impl From<hyper::header::ToStrError> for $error_struct {
fn from(err: hyper::header::ToStrError) -> Self {
Self::Common(CommonError::InvalidHeader(err))
}
}
impl CommonErrorDerivative for $error_struct {}
};
}
pub use commonErrorDerivative;
impl CommonError { impl CommonError {
pub fn http_status_code(&self) -> StatusCode { pub fn http_status_code(&self) -> StatusCode {
match self { match self {
CommonError::InternalError( CommonError::InternalError(
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::Quorum(..), GarageError::Timeout
| GarageError::RemoteError(_)
| GarageError::Quorum(_, _, _, _),
) => StatusCode::SERVICE_UNAVAILABLE, ) => StatusCode::SERVICE_UNAVAILABLE,
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => { CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
StatusCode::INTERNAL_SERVER_ERROR StatusCode::INTERNAL_SERVER_ERROR
@ -109,7 +80,9 @@ impl CommonError {
match self { match self {
CommonError::Forbidden(_) => "AccessDenied", CommonError::Forbidden(_) => "AccessDenied",
CommonError::InternalError( CommonError::InternalError(
GarageError::Timeout | GarageError::RemoteError(_) | GarageError::Quorum(..), GarageError::Timeout
| GarageError::RemoteError(_)
| GarageError::Quorum(_, _, _, _),
) => "ServiceUnavailable", ) => "ServiceUnavailable",
CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => { CommonError::InternalError(_) | CommonError::Hyper(_) | CommonError::Http(_) => {
"InternalError" "InternalError"
@ -128,39 +101,18 @@ impl CommonError {
} }
} }
impl TryFrom<HelperError> for CommonError { impl From<HelperError> for CommonError {
type Error = HelperError; fn from(err: HelperError) -> Self {
fn try_from(err: HelperError) -> Result<Self, HelperError> {
match err { match err {
HelperError::Internal(i) => Ok(Self::InternalError(i)), HelperError::Internal(i) => Self::InternalError(i),
HelperError::BadRequest(b) => Ok(Self::BadRequest(b)), HelperError::BadRequest(b) => Self::BadRequest(b),
HelperError::InvalidBucketName(n) => Ok(Self::InvalidBucketName(n)), HelperError::InvalidBucketName(n) => Self::InvalidBucketName(n),
HelperError::NoSuchBucket(n) => Ok(Self::NoSuchBucket(n)), HelperError::NoSuchBucket(n) => Self::NoSuchBucket(n),
e => Err(e), e => Self::bad_request(format!("{}", e)),
} }
} }
} }
/// This function converts HelperErrors into CommonErrors,
/// for variants that exist in CommonError.
/// This is used for helper functions that might return InvalidBucketName
/// or NoSuchBucket for instance, and we want to pass that error
/// up to our caller.
pub fn pass_helper_error(err: HelperError) -> CommonError {
match CommonError::try_from(err) {
Ok(e) => e,
Err(e) => panic!("Helper error `{}` should hot have happenned here", e),
}
}
pub fn helper_error_as_internal(err: HelperError) -> CommonError {
match err {
HelperError::Internal(e) => CommonError::InternalError(e),
e => CommonError::InternalError(GarageError::Message(e.to_string())),
}
}
pub trait CommonErrorDerivative: From<CommonError> { pub trait CommonErrorDerivative: From<CommonError> {
fn internal_error<M: ToString>(msg: M) -> Self { fn internal_error<M: ToString>(msg: M) -> Self {
Self::from(CommonError::InternalError(GarageError::Message( Self::from(CommonError::InternalError(GarageError::Message(

View File

@ -2,7 +2,8 @@ use std::convert::Infallible;
use std::fs::{self, Permissions}; use std::fs::{self, Permissions};
use std::os::unix::fs::PermissionsExt; use std::os::unix::fs::PermissionsExt;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use futures::future::Future; use futures::future::Future;
use futures::stream::{futures_unordered::FuturesUnordered, StreamExt}; use futures::stream::{futures_unordered::FuturesUnordered, StreamExt};
@ -18,7 +19,6 @@ use hyper_util::rt::TokioIo;
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream}; use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
use tokio::sync::watch; use tokio::sync::watch;
use tokio::time::{sleep_until, Instant};
use opentelemetry::{ use opentelemetry::{
global, global,
@ -34,7 +34,7 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use crate::helpers::{BoxBody, ErrorBody}; use crate::helpers::{BoxBody, ErrorBody};
pub trait ApiEndpoint: Send + Sync + 'static { pub(crate) trait ApiEndpoint: Send + Sync + 'static {
fn name(&self) -> &'static str; fn name(&self) -> &'static str;
fn add_span_attributes(&self, span: SpanRef<'_>); fn add_span_attributes(&self, span: SpanRef<'_>);
} }
@ -45,7 +45,8 @@ pub trait ApiError: std::error::Error + Send + Sync + 'static {
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody; fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody;
} }
pub trait ApiHandler: Send + Sync + 'static { #[async_trait]
pub(crate) trait ApiHandler: Send + Sync + 'static {
const API_NAME: &'static str; const API_NAME: &'static str;
const API_NAME_DISPLAY: &'static str; const API_NAME_DISPLAY: &'static str;
@ -53,14 +54,14 @@ pub trait ApiHandler: Send + Sync + 'static {
type Error: ApiError; type Error: ApiError;
fn parse_endpoint(&self, r: &Request<IncomingBody>) -> Result<Self::Endpoint, Self::Error>; fn parse_endpoint(&self, r: &Request<IncomingBody>) -> Result<Self::Endpoint, Self::Error>;
fn handle( async fn handle(
&self, &self,
req: Request<IncomingBody>, req: Request<IncomingBody>,
endpoint: Self::Endpoint, endpoint: Self::Endpoint,
) -> impl Future<Output = Result<Response<BoxBody<Self::Error>>, Self::Error>> + Send; ) -> Result<Response<BoxBody<Self::Error>>, Self::Error>;
} }
pub struct ApiServer<A: ApiHandler> { pub(crate) struct ApiServer<A: ApiHandler> {
region: String, region: String,
api_handler: A, api_handler: A,
@ -245,11 +246,13 @@ impl<A: ApiHandler> ApiServer<A> {
// ==== helper functions ==== // ==== helper functions ====
#[async_trait]
pub trait Accept: Send + Sync + 'static { pub trait Accept: Send + Sync + 'static {
type Stream: AsyncRead + AsyncWrite + Send + Sync + 'static; type Stream: AsyncRead + AsyncWrite + Send + Sync + 'static;
fn accept(&self) -> impl Future<Output = std::io::Result<(Self::Stream, String)>> + Send; async fn accept(&self) -> std::io::Result<(Self::Stream, String)>;
} }
#[async_trait]
impl Accept for TcpListener { impl Accept for TcpListener {
type Stream = TcpStream; type Stream = TcpStream;
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> { async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
@ -261,6 +264,7 @@ impl Accept for TcpListener {
pub struct UnixListenerOn(pub UnixListener, pub String); pub struct UnixListenerOn(pub UnixListener, pub String);
#[async_trait]
impl Accept for UnixListenerOn { impl Accept for UnixListenerOn {
type Stream = UnixStream; type Stream = UnixStream;
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> { async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
@ -287,7 +291,7 @@ where
let connection_collector = tokio::spawn({ let connection_collector = tokio::spawn({
let server_name = server_name.clone(); let server_name = server_name.clone();
async move { async move {
let mut connections = FuturesUnordered::<tokio::task::JoinHandle<()>>::new(); let mut connections = FuturesUnordered::new();
loop { loop {
let collect_next = async { let collect_next = async {
if connections.is_empty() { if connections.is_empty() {
@ -308,34 +312,23 @@ where
} }
} }
} }
let deadline = Instant::now() + Duration::from_secs(10); if !connections.is_empty() {
while !connections.is_empty() {
info!( info!(
"{} server: {} connections still open, deadline in {:.2}s", "{} server: {} connections still open",
server_name, server_name,
connections.len(), connections.len()
(deadline - Instant::now()).as_secs_f32(),
); );
tokio::select! { while let Some(conn_res) = connections.next().await {
conn_res = connections.next() => {
trace!( trace!(
"{} server: HTTP connection finished: {:?}", "{} server: HTTP connection finished: {:?}",
server_name, server_name,
conn_res.unwrap(), conn_res
); );
} info!(
_ = sleep_until(deadline) => { "{} server: {} connections still open",
warn!("{} server: exit deadline reached with {} connections still open, killing them now",
server_name, server_name,
connections.len()); connections.len()
for conn in connections.iter() { );
conn.abort();
}
for conn in connections {
assert!(conn.await.unwrap_err().is_cancelled());
}
break;
}
} }
} }
} }

View File

@ -363,9 +363,9 @@ mod tests {
} }
#[derive(Serialize)] #[derive(Serialize)]
pub struct CustomApiErrorBody { pub(crate) struct CustomApiErrorBody {
pub code: String, pub(crate) code: String,
pub message: String, pub(crate) message: String,
pub region: String, pub(crate) region: String,
pub path: String, pub(crate) path: String,
} }

View File

@ -1,37 +0,0 @@
[package]
name = "garage_api_k2v"
version = "1.0.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
description = "K2V API server crate for the Garage object store"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[lib]
path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model = { workspace = true, features = [ "k2v" ] }
garage_table.workspace = true
garage_util = { workspace = true, features = [ "k2v" ] }
garage_api_common.workspace = true
base64.workspace = true
err-derive.workspace = true
tracing.workspace = true
futures.workspace = true
tokio.workspace = true
http.workspace = true
http-body-util.workspace = true
hyper = { workspace = true, default-features = false, features = ["server", "http1"] }
percent-encoding.workspace = true
url.workspace = true
serde.workspace = true
serde_json.workspace = true
opentelemetry.workspace = true

View File

@ -1,5 +1,7 @@
use std::sync::Arc; use std::sync::Arc;
use async_trait::async_trait;
use hyper::{body::Incoming as IncomingBody, Method, Request, Response}; use hyper::{body::Incoming as IncomingBody, Method, Request, Response};
use tokio::sync::watch; use tokio::sync::watch;
@ -10,25 +12,26 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_api_common::cors::*; use crate::generic_server::*;
use garage_api_common::generic_server::*; use crate::k2v::error::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_request;
use crate::batch::*; use crate::signature::verify_request;
use crate::error::*;
use crate::index::*;
use crate::item::*;
use crate::router::Endpoint;
pub use garage_api_common::signature::streaming::ReqBody; use crate::helpers::*;
use crate::k2v::batch::*;
use crate::k2v::index::*;
use crate::k2v::item::*;
use crate::k2v::router::Endpoint;
use crate::s3::cors::*;
pub use crate::signature::streaming::ReqBody;
pub type ResBody = BoxBody<Error>; pub type ResBody = BoxBody<Error>;
pub struct K2VApiServer { pub struct K2VApiServer {
garage: Arc<Garage>, garage: Arc<Garage>,
} }
pub struct K2VApiEndpoint { pub(crate) struct K2VApiEndpoint {
bucket_name: String, bucket_name: String,
endpoint: Endpoint, endpoint: Endpoint,
} }
@ -46,6 +49,7 @@ impl K2VApiServer {
} }
} }
#[async_trait]
impl ApiHandler for K2VApiServer { impl ApiHandler for K2VApiServer {
const API_NAME: &'static str = "k2v"; const API_NAME: &'static str = "k2v";
const API_NAME_DISPLAY: &'static str = "K2V"; const API_NAME_DISPLAY: &'static str = "K2V";
@ -73,7 +77,7 @@ impl ApiHandler for K2VApiServer {
} = endpoint; } = endpoint;
let garage = self.garage.clone(); let garage = self.garage.clone();
// The OPTIONS method is processed early, before we even check for an API key // The OPTIONS method is procesed early, before we even check for an API key
if let Endpoint::Options = endpoint { if let Endpoint::Options = endpoint {
let options_res = handle_options_api(garage, &req, Some(bucket_name)) let options_res = handle_options_api(garage, &req, Some(bucket_name))
.await .await
@ -86,13 +90,11 @@ impl ApiHandler for K2VApiServer {
let bucket_id = garage let bucket_id = garage
.bucket_helper() .bucket_helper()
.resolve_bucket(&bucket_name, &api_key) .resolve_bucket(&bucket_name, &api_key)
.await .await?;
.map_err(pass_helper_error)?;
let bucket = garage let bucket = garage
.bucket_helper() .bucket_helper()
.get_existing_bucket(bucket_id) .get_existing_bucket(bucket_id)
.await .await?;
.map_err(helper_error_as_internal)?;
let bucket_params = bucket.state.into_option().unwrap(); let bucket_params = bucket.state.into_option().unwrap();
let allowed = match endpoint.authorization_type() { let allowed = match endpoint.authorization_type() {

View File

@ -4,14 +4,13 @@ use serde::{Deserialize, Serialize};
use garage_table::{EnumerationOrder, TableSchema}; use garage_table::{EnumerationOrder, TableSchema};
use garage_model::k2v::causality::*;
use garage_model::k2v::item_table::*; use garage_model::k2v::item_table::*;
use garage_api_common::helpers::*; use crate::helpers::*;
use crate::k2v::api_server::{ReqBody, ResBody};
use crate::api_server::{ReqBody, ResBody}; use crate::k2v::error::*;
use crate::error::*; use crate::k2v::range::read_range;
use crate::item::parse_causality_token;
use crate::range::read_range;
pub async fn handle_insert_batch( pub async fn handle_insert_batch(
ctx: ReqCtx, ctx: ReqCtx,
@ -24,7 +23,7 @@ pub async fn handle_insert_batch(
let mut items2 = vec![]; let mut items2 = vec![];
for it in items { for it in items {
let ct = it.ct.map(|s| parse_causality_token(&s)).transpose()?; let ct = it.ct.map(|s| CausalContext::parse_helper(&s)).transpose()?;
let v = match it.v { let v = match it.v {
Some(vs) => DvvsValue::Value( Some(vs) => DvvsValue::Value(
BASE64_STANDARD BASE64_STANDARD
@ -282,8 +281,7 @@ pub(crate) async fn handle_poll_range(
query.seen_marker, query.seen_marker,
timeout_msec, timeout_msec,
) )
.await .await?;
.map_err(pass_helper_error)?;
if let Some((items, seen_marker)) = resp { if let Some((items, seen_marker)) = resp {
let resp = PollRangeResponse { let resp = PollRangeResponse {

View File

@ -2,21 +2,18 @@ use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use garage_api_common::common_error::{commonErrorDerivative, CommonError}; use crate::common_error::CommonError;
pub(crate) use garage_api_common::common_error::{helper_error_as_internal, pass_helper_error}; pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
pub use garage_api_common::common_error::{ use crate::generic_server::ApiError;
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError, use crate::helpers::*;
}; use crate::signature::error::Error as SignatureError;
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
use garage_api_common::signature::error::Error as SignatureError;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error(display = "{}", _0)] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[error(source)] CommonError), Common(CommonError),
// Category: cannot process // Category: cannot process
/// Authorization Header Malformed /// Authorization Header Malformed
@ -31,10 +28,6 @@ pub enum Error {
#[error(display = "Invalid base64: {}", _0)] #[error(display = "Invalid base64: {}", _0)]
InvalidBase64(#[error(source)] base64::DecodeError), InvalidBase64(#[error(source)] base64::DecodeError),
/// Invalid causality token
#[error(display = "Invalid causality token")]
InvalidCausalityToken,
/// The client asked for an invalid return format (invalid Accept header) /// The client asked for an invalid return format (invalid Accept header)
#[error(display = "Not acceptable: {}", _0)] #[error(display = "Not acceptable: {}", _0)]
NotAcceptable(String), NotAcceptable(String),
@ -44,7 +37,16 @@ pub enum Error {
InvalidUtf8Str(#[error(source)] std::str::Utf8Error), InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
} }
commonErrorDerivative!(Error); impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
}
}
impl CommonErrorDerivative for Error {}
impl From<SignatureError> for Error { impl From<SignatureError> for Error {
fn from(err: SignatureError) -> Self { fn from(err: SignatureError) -> Self {
@ -70,7 +72,6 @@ impl Error {
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed", Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
Error::InvalidBase64(_) => "InvalidBase64", Error::InvalidBase64(_) => "InvalidBase64",
Error::InvalidUtf8Str(_) => "InvalidUtf8String", Error::InvalidUtf8Str(_) => "InvalidUtf8String",
Error::InvalidCausalityToken => "CausalityToken",
} }
} }
} }
@ -84,8 +85,7 @@ impl ApiError for Error {
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE, Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
Error::AuthorizationHeaderMalformed(_) Error::AuthorizationHeaderMalformed(_)
| Error::InvalidBase64(_) | Error::InvalidBase64(_)
| Error::InvalidUtf8Str(_) | Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
| Error::InvalidCausalityToken => StatusCode::BAD_REQUEST,
} }
} }

View File

@ -1,15 +1,17 @@
use std::sync::Arc;
use hyper::Response; use hyper::Response;
use serde::Serialize; use serde::Serialize;
use garage_rpc::ring::Ring;
use garage_table::util::*; use garage_table::util::*;
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES}; use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
use garage_api_common::helpers::*; use crate::helpers::*;
use crate::k2v::api_server::ResBody;
use crate::api_server::ResBody; use crate::k2v::error::*;
use crate::error::*; use crate::k2v::range::read_range;
use crate::range::read_range;
pub async fn handle_read_index( pub async fn handle_read_index(
ctx: ReqCtx, ctx: ReqCtx,
@ -25,11 +27,7 @@ pub async fn handle_read_index(
let reverse = reverse.unwrap_or(false); let reverse = reverse.unwrap_or(false);
let node_id_vec = garage let ring: Arc<Ring> = garage.system.ring.borrow().clone();
.system
.cluster_layout()
.all_nongateway_nodes()
.to_vec();
let (partition_keys, more, next_start) = read_range( let (partition_keys, more, next_start) = read_range(
&garage.k2v.counter_table.table, &garage.k2v.counter_table.table,
@ -38,7 +36,7 @@ pub async fn handle_read_index(
&start, &start,
&end, &end,
limit, limit,
Some((DeletedFilter::NotDeleted, node_id_vec)), Some((DeletedFilter::NotDeleted, ring.layout.node_id_vec.clone())),
EnumerationOrder::from_reverse(reverse), EnumerationOrder::from_reverse(reverse),
) )
.await?; .await?;
@ -57,7 +55,7 @@ pub async fn handle_read_index(
partition_keys: partition_keys partition_keys: partition_keys
.into_iter() .into_iter()
.map(|part| { .map(|part| {
let vals = part.filtered_values(&garage.system.cluster_layout()); let vals = part.filtered_values(&ring);
ReadIndexResponseEntry { ReadIndexResponseEntry {
pk: part.sk, pk: part.sk,
entries: *vals.get(&s_entries).unwrap_or(&0), entries: *vals.get(&s_entries).unwrap_or(&0),

View File

@ -6,10 +6,9 @@ use hyper::{Request, Response, StatusCode};
use garage_model::k2v::causality::*; use garage_model::k2v::causality::*;
use garage_model::k2v::item_table::*; use garage_model::k2v::item_table::*;
use garage_api_common::helpers::*; use crate::helpers::*;
use crate::k2v::api_server::{ReqBody, ResBody};
use crate::api_server::{ReqBody, ResBody}; use crate::k2v::error::*;
use crate::error::*;
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token"; pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
@ -19,10 +18,6 @@ pub enum ReturnFormat {
Either, Either,
} }
pub(crate) fn parse_causality_token(s: &str) -> Result<CausalContext, Error> {
CausalContext::parse(s).ok_or(Error::InvalidCausalityToken)
}
impl ReturnFormat { impl ReturnFormat {
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> { pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
let accept = match req.headers().get(header::ACCEPT) { let accept = match req.headers().get(header::ACCEPT) {
@ -141,7 +136,7 @@ pub async fn handle_insert_item(
.get(X_GARAGE_CAUSALITY_TOKEN) .get(X_GARAGE_CAUSALITY_TOKEN)
.map(|s| s.to_str()) .map(|s| s.to_str())
.transpose()? .transpose()?
.map(parse_causality_token) .map(CausalContext::parse_helper)
.transpose()?; .transpose()?;
let body = http_body_util::BodyExt::collect(req.into_body()) let body = http_body_util::BodyExt::collect(req.into_body())
@ -181,7 +176,7 @@ pub async fn handle_delete_item(
.get(X_GARAGE_CAUSALITY_TOKEN) .get(X_GARAGE_CAUSALITY_TOKEN)
.map(|s| s.to_str()) .map(|s| s.to_str())
.transpose()? .transpose()?
.map(parse_causality_token) .map(CausalContext::parse_helper)
.transpose()?; .transpose()?;
let value = DvvsValue::Deleted; let value = DvvsValue::Deleted;

View File

@ -1,6 +1,3 @@
#[macro_use]
extern crate tracing;
pub mod api_server; pub mod api_server;
mod error; mod error;
mod router; mod router;

View File

@ -7,9 +7,8 @@ use std::sync::Arc;
use garage_table::replication::TableShardedReplication; use garage_table::replication::TableShardedReplication;
use garage_table::*; use garage_table::*;
use garage_api_common::helpers::key_after_prefix; use crate::helpers::key_after_prefix;
use crate::k2v::error::*;
use crate::error::*;
/// Read range in a Garage table. /// Read range in a Garage table.
/// Returns (entries, more?, nextStart) /// Returns (entries, more?, nextStart)

View File

@ -1,11 +1,11 @@
use crate::error::*; use crate::k2v::error::*;
use std::borrow::Cow; use std::borrow::Cow;
use hyper::{Method, Request}; use hyper::{Method, Request};
use garage_api_common::helpers::Authorization; use crate::helpers::Authorization;
use garage_api_common::router_macros::{generateQueryParameters, router_match}; use crate::router_macros::{generateQueryParameters, router_match};
router_match! {@func router_match! {@func

17
src/api/lib.rs Normal file
View File

@ -0,0 +1,17 @@
//! Crate for serving a S3 compatible API
#[macro_use]
extern crate tracing;
pub mod common_error;
mod encoding;
pub mod generic_server;
pub mod helpers;
mod router_macros;
/// This mode is public only to help testing. Don't expect stability here
pub mod signature;
pub mod admin;
#[cfg(feature = "k2v")]
pub mod k2v;
pub mod s3;

View File

@ -1,6 +1,5 @@
/// This macro is used to generate very repetitive match {} blocks in this module /// This macro is used to generate very repetitive match {} blocks in this module
/// It is _not_ made to be used anywhere else /// It is _not_ made to be used anywhere else
#[macro_export]
macro_rules! router_match { macro_rules! router_match {
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{ (@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] } // usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
@ -134,7 +133,6 @@ macro_rules! router_match {
/// This macro is used to generate part of the code in this module. It must be called only one, and /// This macro is used to generate part of the code in this module. It must be called only one, and
/// is useless outside of this module. /// is useless outside of this module.
#[macro_export]
macro_rules! generateQueryParameters { macro_rules! generateQueryParameters {
( (
keywords: [ $($kw_param:expr => $kw_name: ident),* ], keywords: [ $($kw_param:expr => $kw_name: ident),* ],
@ -206,7 +204,7 @@ macro_rules! generateQueryParameters {
} }
/// Get an error message in case not all parameters where used when extracting them to /// Get an error message in case not all parameters where used when extracting them to
/// build an Endpoint variant /// build an Enpoint variant
fn nonempty_message(&self) -> Option<&str> { fn nonempty_message(&self) -> Option<&str> {
if self.keyword.is_some() { if self.keyword.is_some() {
Some("Keyword not used") Some("Keyword not used")
@ -222,5 +220,5 @@ macro_rules! generateQueryParameters {
} }
} }
pub use generateQueryParameters; pub(crate) use generateQueryParameters;
pub use router_match; pub(crate) use router_match;

View File

@ -1,5 +1,7 @@
use std::sync::Arc; use std::sync::Arc;
use async_trait::async_trait;
use hyper::header; use hyper::header;
use hyper::{body::Incoming as IncomingBody, Request, Response}; use hyper::{body::Incoming as IncomingBody, Request, Response};
use tokio::sync::watch; use tokio::sync::watch;
@ -12,33 +14,33 @@ use garage_util::socket_address::UnixOrTCPSocketAddress;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::key_table::Key; use garage_model::key_table::Key;
use garage_api_common::cors::*; use crate::generic_server::*;
use garage_api_common::generic_server::*; use crate::s3::error::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_request;
use crate::bucket::*; use crate::signature::verify_request;
use crate::copy::*;
use crate::cors::*;
use crate::delete::*;
use crate::error::*;
use crate::get::*;
use crate::lifecycle::*;
use crate::list::*;
use crate::multipart::*;
use crate::post_object::handle_post_object;
use crate::put::*;
use crate::router::Endpoint;
use crate::website::*;
pub use garage_api_common::signature::streaming::ReqBody; use crate::helpers::*;
use crate::s3::bucket::*;
use crate::s3::copy::*;
use crate::s3::cors::*;
use crate::s3::delete::*;
use crate::s3::get::*;
use crate::s3::lifecycle::*;
use crate::s3::list::*;
use crate::s3::multipart::*;
use crate::s3::post_object::handle_post_object;
use crate::s3::put::*;
use crate::s3::router::Endpoint;
use crate::s3::website::*;
pub use crate::signature::streaming::ReqBody;
pub type ResBody = BoxBody<Error>; pub type ResBody = BoxBody<Error>;
pub struct S3ApiServer { pub struct S3ApiServer {
garage: Arc<Garage>, garage: Arc<Garage>,
} }
pub struct S3ApiEndpoint { pub(crate) struct S3ApiEndpoint {
bucket_name: Option<String>, bucket_name: Option<String>,
endpoint: Endpoint, endpoint: Endpoint,
} }
@ -68,6 +70,7 @@ impl S3ApiServer {
} }
} }
#[async_trait]
impl ApiHandler for S3ApiServer { impl ApiHandler for S3ApiServer {
const API_NAME: &'static str = "s3"; const API_NAME: &'static str = "s3";
const API_NAME_DISPLAY: &'static str = "S3"; const API_NAME_DISPLAY: &'static str = "S3";
@ -147,8 +150,7 @@ impl ApiHandler for S3ApiServer {
let bucket_id = garage let bucket_id = garage
.bucket_helper() .bucket_helper()
.resolve_bucket(&bucket_name, &api_key) .resolve_bucket(&bucket_name, &api_key)
.await .await?;
.map_err(pass_helper_error)?;
let bucket = garage let bucket = garage
.bucket_helper() .bucket_helper()
.get_existing_bucket(bucket_id) .get_existing_bucket(bucket_id)
@ -323,7 +325,7 @@ impl ApiHandler for S3ApiServer {
part_number_marker: part_number_marker.map(|p| p.min(10000)), part_number_marker: part_number_marker.map(|p| p.min(10000)),
max_parts: max_parts.unwrap_or(1000).clamp(1, 1000), max_parts: max_parts.unwrap_or(1000).clamp(1, 1000),
}; };
handle_list_parts(ctx, req, &query).await handle_list_parts(ctx, &query).await
} }
Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await, Endpoint::DeleteObjects {} => handle_delete_objects(ctx, req, content_sha256).await,
Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await, Endpoint::GetBucketWebsite {} => handle_get_website(ctx).await,

View File

@ -13,13 +13,12 @@ use garage_util::crdt::*;
use garage_util::data::*; use garage_util::data::*;
use garage_util::time::*; use garage_util::time::*;
use garage_api_common::common_error::CommonError; use crate::common_error::CommonError;
use garage_api_common::helpers::*; use crate::helpers::*;
use garage_api_common::signature::verify_signed_content; use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::api_server::{ReqBody, ResBody}; use crate::s3::xml as s3_xml;
use crate::error::*; use crate::signature::verify_signed_content;
use crate::xml as s3_xml;
pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> { pub fn handle_get_bucket_location(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { garage, .. } = ctx; let ReqCtx { garage, .. } = ctx;

View File

@ -1,406 +0,0 @@
use std::convert::{TryFrom, TryInto};
use std::hash::Hasher;
use base64::prelude::*;
use crc32c::Crc32cHasher as Crc32c;
use crc32fast::Hasher as Crc32;
use md5::{Digest, Md5};
use sha1::Sha1;
use sha2::Sha256;
use http::{HeaderMap, HeaderName, HeaderValue};
use garage_util::data::*;
use garage_util::error::OkOrMessage;
use garage_model::s3::object_table::*;
use crate::error::*;
pub const X_AMZ_CHECKSUM_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-checksum-algorithm");
pub const X_AMZ_CHECKSUM_MODE: HeaderName = HeaderName::from_static("x-amz-checksum-mode");
pub const X_AMZ_CHECKSUM_CRC32: HeaderName = HeaderName::from_static("x-amz-checksum-crc32");
pub const X_AMZ_CHECKSUM_CRC32C: HeaderName = HeaderName::from_static("x-amz-checksum-crc32c");
pub const X_AMZ_CHECKSUM_SHA1: HeaderName = HeaderName::from_static("x-amz-checksum-sha1");
pub const X_AMZ_CHECKSUM_SHA256: HeaderName = HeaderName::from_static("x-amz-checksum-sha256");
pub type Crc32Checksum = [u8; 4];
pub type Crc32cChecksum = [u8; 4];
pub type Md5Checksum = [u8; 16];
pub type Sha1Checksum = [u8; 20];
pub type Sha256Checksum = [u8; 32];
#[derive(Debug, Default)]
pub(crate) struct ExpectedChecksums {
// base64-encoded md5 (content-md5 header)
pub md5: Option<String>,
// content_sha256 (as a Hash / FixedBytes32)
pub sha256: Option<Hash>,
// extra x-amz-checksum-* header
pub extra: Option<ChecksumValue>,
}
pub(crate) struct Checksummer {
pub crc32: Option<Crc32>,
pub crc32c: Option<Crc32c>,
pub md5: Option<Md5>,
pub sha1: Option<Sha1>,
pub sha256: Option<Sha256>,
}
#[derive(Default)]
pub(crate) struct Checksums {
pub crc32: Option<Crc32Checksum>,
pub crc32c: Option<Crc32cChecksum>,
pub md5: Option<Md5Checksum>,
pub sha1: Option<Sha1Checksum>,
pub sha256: Option<Sha256Checksum>,
}
impl Checksummer {
pub(crate) fn init(expected: &ExpectedChecksums, require_md5: bool) -> Self {
let mut ret = Self {
crc32: None,
crc32c: None,
md5: None,
sha1: None,
sha256: None,
};
if expected.md5.is_some() || require_md5 {
ret.md5 = Some(Md5::new());
}
if expected.sha256.is_some() || matches!(&expected.extra, Some(ChecksumValue::Sha256(_))) {
ret.sha256 = Some(Sha256::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32(_))) {
ret.crc32 = Some(Crc32::new());
}
if matches!(&expected.extra, Some(ChecksumValue::Crc32c(_))) {
ret.crc32c = Some(Crc32c::default());
}
if matches!(&expected.extra, Some(ChecksumValue::Sha1(_))) {
ret.sha1 = Some(Sha1::new());
}
ret
}
pub(crate) fn add(mut self, algo: Option<ChecksumAlgorithm>) -> Self {
match algo {
Some(ChecksumAlgorithm::Crc32) => {
self.crc32 = Some(Crc32::new());
}
Some(ChecksumAlgorithm::Crc32c) => {
self.crc32c = Some(Crc32c::default());
}
Some(ChecksumAlgorithm::Sha1) => {
self.sha1 = Some(Sha1::new());
}
Some(ChecksumAlgorithm::Sha256) => {
self.sha256 = Some(Sha256::new());
}
None => (),
}
self
}
pub(crate) fn update(&mut self, bytes: &[u8]) {
if let Some(crc32) = &mut self.crc32 {
crc32.update(bytes);
}
if let Some(crc32c) = &mut self.crc32c {
crc32c.write(bytes);
}
if let Some(md5) = &mut self.md5 {
md5.update(bytes);
}
if let Some(sha1) = &mut self.sha1 {
sha1.update(bytes);
}
if let Some(sha256) = &mut self.sha256 {
sha256.update(bytes);
}
}
pub(crate) fn finalize(self) -> Checksums {
Checksums {
crc32: self.crc32.map(|x| u32::to_be_bytes(x.finalize())),
crc32c: self
.crc32c
.map(|x| u32::to_be_bytes(u32::try_from(x.finish()).unwrap())),
md5: self.md5.map(|x| x.finalize()[..].try_into().unwrap()),
sha1: self.sha1.map(|x| x.finalize()[..].try_into().unwrap()),
sha256: self.sha256.map(|x| x.finalize()[..].try_into().unwrap()),
}
}
}
impl Checksums {
pub fn verify(&self, expected: &ExpectedChecksums) -> Result<(), Error> {
if let Some(expected_md5) = &expected.md5 {
match self.md5 {
Some(md5) if BASE64_STANDARD.encode(&md5) == expected_md5.trim_matches('"') => (),
_ => {
return Err(Error::InvalidDigest(
"MD5 checksum verification failed (from content-md5)".into(),
))
}
}
}
if let Some(expected_sha256) = &expected.sha256 {
match self.sha256 {
Some(sha256) if &sha256[..] == expected_sha256.as_slice() => (),
_ => {
return Err(Error::InvalidDigest(
"SHA256 checksum verification failed (from x-amz-content-sha256)".into(),
))
}
}
}
if let Some(extra) = expected.extra {
let algo = extra.algorithm();
if self.extract(Some(algo)) != Some(extra) {
return Err(Error::InvalidDigest(format!(
"Failed to validate checksum for algorithm {:?}",
algo
)));
}
}
Ok(())
}
pub fn extract(&self, algo: Option<ChecksumAlgorithm>) -> Option<ChecksumValue> {
match algo {
None => None,
Some(ChecksumAlgorithm::Crc32) => Some(ChecksumValue::Crc32(self.crc32.unwrap())),
Some(ChecksumAlgorithm::Crc32c) => Some(ChecksumValue::Crc32c(self.crc32c.unwrap())),
Some(ChecksumAlgorithm::Sha1) => Some(ChecksumValue::Sha1(self.sha1.unwrap())),
Some(ChecksumAlgorithm::Sha256) => Some(ChecksumValue::Sha256(self.sha256.unwrap())),
}
}
}
// ----
#[derive(Default)]
pub(crate) struct MultipartChecksummer {
pub md5: Md5,
pub extra: Option<MultipartExtraChecksummer>,
}
pub(crate) enum MultipartExtraChecksummer {
Crc32(Crc32),
Crc32c(Crc32c),
Sha1(Sha1),
Sha256(Sha256),
}
impl MultipartChecksummer {
pub(crate) fn init(algo: Option<ChecksumAlgorithm>) -> Self {
Self {
md5: Md5::new(),
extra: match algo {
None => None,
Some(ChecksumAlgorithm::Crc32) => {
Some(MultipartExtraChecksummer::Crc32(Crc32::new()))
}
Some(ChecksumAlgorithm::Crc32c) => {
Some(MultipartExtraChecksummer::Crc32c(Crc32c::default()))
}
Some(ChecksumAlgorithm::Sha1) => Some(MultipartExtraChecksummer::Sha1(Sha1::new())),
Some(ChecksumAlgorithm::Sha256) => {
Some(MultipartExtraChecksummer::Sha256(Sha256::new()))
}
},
}
}
pub(crate) fn update(
&mut self,
etag: &str,
checksum: Option<ChecksumValue>,
) -> Result<(), Error> {
self.md5
.update(&hex::decode(&etag).ok_or_message("invalid etag hex")?);
match (&mut self.extra, checksum) {
(None, _) => (),
(
Some(MultipartExtraChecksummer::Crc32(ref mut crc32)),
Some(ChecksumValue::Crc32(x)),
) => {
crc32.update(&x);
}
(
Some(MultipartExtraChecksummer::Crc32c(ref mut crc32c)),
Some(ChecksumValue::Crc32c(x)),
) => {
crc32c.write(&x);
}
(Some(MultipartExtraChecksummer::Sha1(ref mut sha1)), Some(ChecksumValue::Sha1(x))) => {
sha1.update(&x);
}
(
Some(MultipartExtraChecksummer::Sha256(ref mut sha256)),
Some(ChecksumValue::Sha256(x)),
) => {
sha256.update(&x);
}
(Some(_), b) => {
return Err(Error::internal_error(format!(
"part checksum was not computed correctly, got: {:?}",
b
)))
}
}
Ok(())
}
pub(crate) fn finalize(self) -> (Md5Checksum, Option<ChecksumValue>) {
let md5 = self.md5.finalize()[..].try_into().unwrap();
let extra = match self.extra {
None => None,
Some(MultipartExtraChecksummer::Crc32(crc32)) => {
Some(ChecksumValue::Crc32(u32::to_be_bytes(crc32.finalize())))
}
Some(MultipartExtraChecksummer::Crc32c(crc32c)) => Some(ChecksumValue::Crc32c(
u32::to_be_bytes(u32::try_from(crc32c.finish()).unwrap()),
)),
Some(MultipartExtraChecksummer::Sha1(sha1)) => {
Some(ChecksumValue::Sha1(sha1.finalize()[..].try_into().unwrap()))
}
Some(MultipartExtraChecksummer::Sha256(sha256)) => Some(ChecksumValue::Sha256(
sha256.finalize()[..].try_into().unwrap(),
)),
};
(md5, extra)
}
}
// ----
/// Extract the value of the x-amz-checksum-algorithm header
pub(crate) fn request_checksum_algorithm(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumAlgorithm>, Error> {
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
None => Ok(None),
Some(x) if x == "CRC32" => Ok(Some(ChecksumAlgorithm::Crc32)),
Some(x) if x == "CRC32C" => Ok(Some(ChecksumAlgorithm::Crc32c)),
Some(x) if x == "SHA1" => Ok(Some(ChecksumAlgorithm::Sha1)),
Some(x) if x == "SHA256" => Ok(Some(ChecksumAlgorithm::Sha256)),
_ => Err(Error::bad_request("invalid checksum algorithm")),
}
}
/// Extract the value of any of the x-amz-checksum-* headers
pub(crate) fn request_checksum_value(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumValue>, Error> {
let mut ret = vec![];
if let Some(crc32_str) = headers.get(X_AMZ_CHECKSUM_CRC32) {
let crc32 = BASE64_STANDARD
.decode(&crc32_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
ret.push(ChecksumValue::Crc32(crc32))
}
if let Some(crc32c_str) = headers.get(X_AMZ_CHECKSUM_CRC32C) {
let crc32c = BASE64_STANDARD
.decode(&crc32c_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
ret.push(ChecksumValue::Crc32c(crc32c))
}
if let Some(sha1_str) = headers.get(X_AMZ_CHECKSUM_SHA1) {
let sha1 = BASE64_STANDARD
.decode(&sha1_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
ret.push(ChecksumValue::Sha1(sha1))
}
if let Some(sha256_str) = headers.get(X_AMZ_CHECKSUM_SHA256) {
let sha256 = BASE64_STANDARD
.decode(&sha256_str)
.ok()
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
ret.push(ChecksumValue::Sha256(sha256))
}
if ret.len() > 1 {
return Err(Error::bad_request(
"multiple x-amz-checksum-* headers given",
));
}
Ok(ret.pop())
}
/// Checks for the presence of x-amz-checksum-algorithm
/// if so extract the corresponding x-amz-checksum-* value
pub(crate) fn request_checksum_algorithm_value(
headers: &HeaderMap<HeaderValue>,
) -> Result<Option<ChecksumValue>, Error> {
match headers.get(X_AMZ_CHECKSUM_ALGORITHM) {
Some(x) if x == "CRC32" => {
let crc32 = headers
.get(X_AMZ_CHECKSUM_CRC32)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32 header")?;
Ok(Some(ChecksumValue::Crc32(crc32)))
}
Some(x) if x == "CRC32C" => {
let crc32c = headers
.get(X_AMZ_CHECKSUM_CRC32C)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-crc32c header")?;
Ok(Some(ChecksumValue::Crc32c(crc32c)))
}
Some(x) if x == "SHA1" => {
let sha1 = headers
.get(X_AMZ_CHECKSUM_SHA1)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha1 header")?;
Ok(Some(ChecksumValue::Sha1(sha1)))
}
Some(x) if x == "SHA256" => {
let sha256 = headers
.get(X_AMZ_CHECKSUM_SHA256)
.and_then(|x| BASE64_STANDARD.decode(&x).ok())
.and_then(|x| x.try_into().ok())
.ok_or_bad_request("invalid x-amz-checksum-sha256 header")?;
Ok(Some(ChecksumValue::Sha256(sha256)))
}
Some(_) => Err(Error::bad_request("invalid x-amz-checksum-algorithm")),
None => Ok(None),
}
}
pub(crate) fn add_checksum_response_headers(
checksum: &Option<ChecksumValue>,
mut resp: http::response::Builder,
) -> http::response::Builder {
match checksum {
Some(ChecksumValue::Crc32(crc32)) => {
resp = resp.header(X_AMZ_CHECKSUM_CRC32, BASE64_STANDARD.encode(&crc32));
}
Some(ChecksumValue::Crc32c(crc32c)) => {
resp = resp.header(X_AMZ_CHECKSUM_CRC32C, BASE64_STANDARD.encode(&crc32c));
}
Some(ChecksumValue::Sha1(sha1)) => {
resp = resp.header(X_AMZ_CHECKSUM_SHA1, BASE64_STANDARD.encode(&sha1));
}
Some(ChecksumValue::Sha256(sha256)) => {
resp = resp.header(X_AMZ_CHECKSUM_SHA256, BASE64_STANDARD.encode(&sha256));
}
None => (),
}
resp
}

View File

@ -1,18 +1,17 @@
use std::pin::Pin; use std::pin::Pin;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
use futures::{stream, stream::Stream, StreamExt, TryStreamExt}; use futures::{stream, stream::Stream, StreamExt};
use md5::{Digest as Md5Digest, Md5};
use bytes::Bytes; use bytes::Bytes;
use hyper::{Request, Response}; use hyper::{Request, Response};
use serde::Serialize; use serde::Serialize;
use garage_net::bytes_buf::BytesBuf; use garage_net::bytes_buf::BytesBuf;
use garage_net::stream::read_stream_to_end;
use garage_rpc::rpc_helper::OrderTag; use garage_rpc::rpc_helper::OrderTag;
use garage_table::*; use garage_table::*;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
use garage_model::s3::block_ref_table::*; use garage_model::s3::block_ref_table::*;
@ -20,18 +19,12 @@ use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_api_common::helpers::*; use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::api_server::{ReqBody, ResBody}; use crate::s3::error::*;
use crate::checksum::*; use crate::s3::multipart;
use crate::encryption::EncryptionParams; use crate::s3::put::get_headers;
use crate::error::*; use crate::s3::xml::{self as s3_xml, xmlns_tag};
use crate::get::full_object_byte_stream;
use crate::multipart;
use crate::put::{get_headers, save_stream, ChecksumMode, SaveStreamResult};
use crate::xml::{self as s3_xml, xmlns_tag};
// -------- CopyObject ---------
pub async fn handle_copy( pub async fn handle_copy(
ctx: ReqCtx, ctx: ReqCtx,
@ -40,160 +33,40 @@ pub async fn handle_copy(
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let copy_precondition = CopyPreconditionHeaders::parse(req)?; let copy_precondition = CopyPreconditionHeaders::parse(req)?;
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
let source_object = get_copy_source(&ctx, req).await?; let source_object = get_copy_source(&ctx, req).await?;
let ReqCtx {
garage,
bucket_id: dest_bucket_id,
..
} = ctx;
let (source_version, source_version_data, source_version_meta) = let (source_version, source_version_data, source_version_meta) =
extract_source_info(&source_object)?; extract_source_info(&source_object)?;
// Check precondition, e.g. x-amz-copy-source-if-match // Check precondition, e.g. x-amz-copy-source-if-match
copy_precondition.check(source_version, &source_version_meta.etag)?; copy_precondition.check(source_version, &source_version_meta.etag)?;
// Determine encryption parameters
let (source_encryption, source_object_meta_inner) =
EncryptionParams::check_decrypt_for_copy_source(
&ctx.garage,
req.headers(),
&source_version_meta.encryption,
)?;
let dest_encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
// Extract source checksum info before source_object_meta_inner is consumed
let source_checksum = source_object_meta_inner.checksum;
let source_checksum_algorithm = source_checksum.map(|x| x.algorithm());
// If source object has a checksum, the destination object must as well.
// The x-amz-checksum-algorithm header allows to change that algorithm,
// but if it is absent, we must use the same as before
let checksum_algorithm = checksum_algorithm.or(source_checksum_algorithm);
// Determine metadata of destination object
let was_multipart = source_version_meta.etag.contains('-');
let dest_object_meta = ObjectVersionMetaInner {
headers: match req.headers().get("x-amz-metadata-directive") {
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => {
get_headers(req.headers())?
}
_ => source_object_meta_inner.into_owned().headers,
},
checksum: source_checksum,
};
// Do actual object copying
//
// In any of the following scenarios, we need to read the whole object
// data and re-write it again:
//
// - the data needs to be decrypted or encrypted
// - the requested checksum algorithm requires us to recompute a checksum
// - the original object was a multipart upload and a checksum algorithm
// is defined (AWS specifies that in this case, we must recompute the
// checksum from scratch as if this was a single big object and not
// a multipart object, as the checksums are not computed in the same way)
//
// In other cases, we can just copy the metadata and reference the same blocks.
//
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
let must_recopy = !EncryptionParams::is_same(&source_encryption, &dest_encryption)
|| source_checksum_algorithm != checksum_algorithm
|| (was_multipart && checksum_algorithm.is_some());
let res = if !must_recopy {
// In most cases, we can just copy the metadata and link blocks of the
// old object from the new object.
handle_copy_metaonly(
ctx,
dest_key,
dest_object_meta,
dest_encryption,
source_version,
source_version_data,
source_version_meta,
)
.await?
} else {
let expected_checksum = ExpectedChecksums {
md5: None,
sha256: None,
extra: source_checksum,
};
let checksum_mode = if was_multipart || source_checksum_algorithm != checksum_algorithm {
ChecksumMode::Calculate(checksum_algorithm)
} else {
ChecksumMode::Verify(&expected_checksum)
};
// If source and dest encryption use different keys,
// we must decrypt content and re-encrypt, so rewrite all data blocks.
handle_copy_reencrypt(
ctx,
dest_key,
dest_object_meta,
dest_encryption,
source_version,
source_version_data,
source_encryption,
checksum_mode,
)
.await?
};
let last_modified = msec_to_rfc3339(res.version_timestamp);
let result = CopyObjectResult {
last_modified: s3_xml::Value(last_modified),
etag: s3_xml::Value(format!("\"{}\"", res.etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
let mut resp = Response::builder()
.header("Content-Type", "application/xml")
.header("x-amz-version-id", hex::encode(res.version_uuid))
.header(
"x-amz-copy-source-version-id",
hex::encode(source_version.uuid),
);
dest_encryption.add_response_headers(&mut resp);
Ok(resp.body(string_body(xml))?)
}
async fn handle_copy_metaonly(
ctx: ReqCtx,
dest_key: &str,
dest_object_meta: ObjectVersionMetaInner,
dest_encryption: EncryptionParams,
source_version: &ObjectVersion,
source_version_data: &ObjectVersionData,
source_version_meta: &ObjectVersionMeta,
) -> Result<SaveStreamResult, Error> {
let ReqCtx {
garage,
bucket_id: dest_bucket_id,
..
} = ctx;
// Generate parameters for copied object // Generate parameters for copied object
let new_uuid = gen_uuid(); let new_uuid = gen_uuid();
let new_timestamp = now_msec(); let new_timestamp = now_msec();
let new_meta = ObjectVersionMeta { // Implement x-amz-metadata-directive: REPLACE
encryption: dest_encryption.encrypt_meta(dest_object_meta)?, let new_meta = match req.headers().get("x-amz-metadata-directive") {
Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => ObjectVersionMeta {
headers: get_headers(req.headers())?,
size: source_version_meta.size, size: source_version_meta.size,
etag: source_version_meta.etag.clone(), etag: source_version_meta.etag.clone(),
},
_ => source_version_meta.clone(),
}; };
let res = SaveStreamResult { let etag = new_meta.etag.to_string();
version_uuid: new_uuid,
version_timestamp: new_timestamp,
etag: new_meta.etag.clone(),
};
// Save object copy // Save object copy
match source_version_data { match source_version_data {
ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::DeleteMarker => unreachable!(),
ObjectVersionData::Inline(_meta, bytes) => { ObjectVersionData::Inline(_meta, bytes) => {
// bytes is either plaintext before&after or encrypted with the
// same keys, so it's ok to just copy it as is
let dest_object_version = ObjectVersion { let dest_object_version = ObjectVersion {
uuid: new_uuid, uuid: new_uuid,
timestamp: new_timestamp, timestamp: new_timestamp,
@ -224,8 +97,7 @@ async fn handle_copy_metaonly(
uuid: new_uuid, uuid: new_uuid,
timestamp: new_timestamp, timestamp: new_timestamp,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading {
encryption: new_meta.encryption.clone(), headers: new_meta.headers.clone(),
checksum_algorithm: None,
multipart: false, multipart: false,
}, },
}; };
@ -292,42 +164,23 @@ async fn handle_copy_metaonly(
} }
} }
Ok(res) let last_modified = msec_to_rfc3339(new_timestamp);
} let result = CopyObjectResult {
last_modified: s3_xml::Value(last_modified),
etag: s3_xml::Value(format!("\"{}\"", etag)),
};
let xml = s3_xml::to_xml_with_header(&result)?;
async fn handle_copy_reencrypt( Ok(Response::builder()
ctx: ReqCtx, .header("Content-Type", "application/xml")
dest_key: &str, .header("x-amz-version-id", hex::encode(new_uuid))
dest_object_meta: ObjectVersionMetaInner, .header(
dest_encryption: EncryptionParams, "x-amz-copy-source-version-id",
source_version: &ObjectVersion, hex::encode(source_version.uuid),
source_version_data: &ObjectVersionData,
source_encryption: EncryptionParams,
checksum_mode: ChecksumMode<'_>,
) -> Result<SaveStreamResult, Error> {
// basically we will read the source data (decrypt if necessary)
// and save that in a new object (encrypt if necessary),
// by combining the code used in getobject and putobject
let source_stream = full_object_byte_stream(
ctx.garage.clone(),
source_version,
source_version_data,
source_encryption,
);
save_stream(
&ctx,
dest_object_meta,
dest_encryption,
source_stream.map_err(|e| Error::from(GarageError::from(e))),
&dest_key.to_string(),
checksum_mode,
) )
.await .body(string_body(xml))?)
} }
// -------- UploadPartCopy ---------
pub async fn handle_upload_part_copy( pub async fn handle_upload_part_copy(
ctx: ReqCtx, ctx: ReqCtx,
req: &Request<ReqBody>, req: &Request<ReqBody>,
@ -340,7 +193,7 @@ pub async fn handle_upload_part_copy(
let dest_upload_id = multipart::decode_upload_id(upload_id)?; let dest_upload_id = multipart::decode_upload_id(upload_id)?;
let dest_key = dest_key.to_string(); let dest_key = dest_key.to_string();
let (source_object, (_, dest_version, mut dest_mpu)) = futures::try_join!( let (source_object, (_, _, mut dest_mpu)) = futures::try_join!(
get_copy_source(&ctx, req), get_copy_source(&ctx, req),
multipart::get_upload(&ctx, &dest_key, &dest_upload_id) multipart::get_upload(&ctx, &dest_key, &dest_upload_id)
)?; )?;
@ -353,24 +206,6 @@ pub async fn handle_upload_part_copy(
// Check precondition on source, e.g. x-amz-copy-source-if-match // Check precondition on source, e.g. x-amz-copy-source-if-match
copy_precondition.check(source_object_version, &source_version_meta.etag)?; copy_precondition.check(source_object_version, &source_version_meta.etag)?;
// Determine encryption parameters
let (source_encryption, _) = EncryptionParams::check_decrypt_for_copy_source(
&garage,
req.headers(),
&source_version_meta.encryption,
)?;
let (dest_object_encryption, dest_object_checksum_algorithm) = match dest_version.state {
ObjectVersionState::Uploading {
encryption,
checksum_algorithm,
..
} => (encryption, checksum_algorithm),
_ => unreachable!(),
};
let (dest_encryption, _) =
EncryptionParams::check_decrypt(&garage, req.headers(), &dest_object_encryption)?;
let same_encryption = EncryptionParams::is_same(&source_encryption, &dest_encryption);
// Check source range is valid // Check source range is valid
let source_range = match req.headers().get("x-amz-copy-source-range") { let source_range = match req.headers().get("x-amz-copy-source-range") {
Some(range) => { Some(range) => {
@ -392,7 +227,9 @@ pub async fn handle_upload_part_copy(
}; };
// Check source version is not inlined // Check source version is not inlined
if matches!(source_version_data, ObjectVersionData::Inline(_, _)) { match source_version_data {
ObjectVersionData::DeleteMarker => unreachable!(),
ObjectVersionData::Inline(_meta, _bytes) => {
// This is only for small files, we don't bother handling this. // This is only for small files, we don't bother handling this.
// (in AWS UploadPartCopy works for parts at least 5MB which // (in AWS UploadPartCopy works for parts at least 5MB which
// is never the case of an inline object) // is never the case of an inline object)
@ -400,8 +237,11 @@ pub async fn handle_upload_part_copy(
"Source object is too small (minimum part size is 5Mb)", "Source object is too small (minimum part size is 5Mb)",
)); ));
} }
ObjectVersionData::FirstBlock(_meta, _first_block_hash) => (),
};
// Fetch source version with its block list // Fetch source versin with its block list,
// and destination version to check part hasn't yet been uploaded
let source_version = garage let source_version = garage
.version_table .version_table
.get(&source_object_version.uuid, &EmptyKey) .get(&source_object_version.uuid, &EmptyKey)
@ -411,9 +251,7 @@ pub async fn handle_upload_part_copy(
// We want to reuse blocks from the source version as much as possible. // We want to reuse blocks from the source version as much as possible.
// However, we still need to get the data from these blocks // However, we still need to get the data from these blocks
// because we need to know it to calculate the MD5sum of the part // because we need to know it to calculate the MD5sum of the part
// which is used as its ETag. For encrypted sources or destinations, // which is used as its ETag.
// we must always read(+decrypt) and then write(+encrypt), so we
// can never reuse data blocks as is.
// First, calculate what blocks we want to keep, // First, calculate what blocks we want to keep,
// and the subrange of the block to take, if the bounds of the // and the subrange of the block to take, if the bounds of the
@ -462,9 +300,7 @@ pub async fn handle_upload_part_copy(
dest_mpu_part_key, dest_mpu_part_key,
MpuPart { MpuPart {
version: dest_version_id, version: dest_version_id,
// These are all filled in later (bottom of this function)
etag: None, etag: None,
checksum: None,
size: None, size: None,
}, },
); );
@ -477,55 +313,32 @@ pub async fn handle_upload_part_copy(
}, },
false, false,
); );
// write an empty version now to be the parent of the block_ref entries
garage.version_table.insert(&dest_version).await?;
// Now, actually copy the blocks // Now, actually copy the blocks
let mut checksummer = Checksummer::init(&Default::default(), !dest_encryption.is_encrypted()) let mut md5hasher = Md5::new();
.add(dest_object_checksum_algorithm);
// First, create a stream that is able to read the source blocks // First, create a stream that is able to read the source blocks
// and extract the subrange if necessary. // and extract the subrange if necessary.
// The second returned value is an Option<Hash>, that is Some // The second returned value is an Option<Hash>, that is Some
// if and only if the block returned is a block that already existed // if and only if the block returned is a block that already existed
// in the Garage data store and can be reused as-is instead of having // in the Garage data store (thus we don't need to save it again).
// to save it again. This excludes encrypted source blocks that we had
// to decrypt.
let garage2 = garage.clone(); let garage2 = garage.clone();
let order_stream = OrderTag::stream(); let order_stream = OrderTag::stream();
let source_blocks = stream::iter(blocks_to_copy) let source_blocks = stream::iter(blocks_to_copy)
.enumerate() .enumerate()
.map(|(i, (block_hash, range_to_copy))| { .flat_map(|(i, (block_hash, range_to_copy))| {
let garage3 = garage2.clone(); let garage3 = garage2.clone();
async move { stream::once(async move {
let stream = source_encryption let data = garage3
.get_block(&garage3, &block_hash, Some(order_stream.order(i as u64))) .block_manager
.rpc_get_block(&block_hash, Some(order_stream.order(i as u64)))
.await?; .await?;
let data = read_stream_to_end(stream).await?.into_bytes();
// For each item, we return a tuple of:
// 1. the full data block (decrypted)
// 2. an Option<Hash> that indicates the hash of the block in the block store,
// only if it can be re-used as-is in the copied object
match range_to_copy { match range_to_copy {
Some(r) => { Some(r) => Ok((data.slice(r), None)),
// If we are taking a subslice of the data, we cannot reuse the block as-is None => Ok((data, Some(block_hash))),
Ok((data.slice(r), None))
}
None if same_encryption => {
// If the data is unencrypted before & after, or if we are using
// the same encryption key, we can reuse the stored block, no need
// to re-send it to storage nodes.
Ok((data, Some(block_hash)))
}
None => {
// If we are decrypting / (re)encrypting with different keys,
// we cannot reuse the block as-is
Ok((data, None))
}
}
} }
}) })
.buffered(2) })
.peekable(); .peekable();
// The defragmenter is a custom stream (defined below) that concatenates // The defragmenter is a custom stream (defined below) that concatenates
@ -533,39 +346,22 @@ pub async fn handle_upload_part_copy(
// It returns a series of (Vec<u8>, Option<Hash>). // It returns a series of (Vec<u8>, Option<Hash>).
// When it is done, it returns an empty vec. // When it is done, it returns an empty vec.
// Same as the previous iterator, the Option is Some(_) if and only if // Same as the previous iterator, the Option is Some(_) if and only if
// it's an existing block of the Garage data store that can be reused. // it's an existing block of the Garage data store.
let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks)); let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks));
let mut current_offset = 0; let mut current_offset = 0;
let mut next_block = defragmenter.next().await?; let mut next_block = defragmenter.next().await?;
// TODO this could be optimized similarly to read_and_put_blocks
// low priority because uploadpartcopy is rarely used
loop { loop {
let (data, existing_block_hash) = next_block; let (data, existing_block_hash) = next_block;
if data.is_empty() { if data.is_empty() {
break; break;
} }
let data_len = data.len() as u64; md5hasher.update(&data[..]);
let (checksummer_updated, (data_to_upload, final_hash)) = let must_upload = existing_block_hash.is_none();
tokio::task::spawn_blocking(move || { let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..]));
checksummer.update(&data[..]);
let tup = match existing_block_hash {
Some(hash) if same_encryption => (None, hash),
_ => {
let data_enc = dest_encryption.encrypt_block(data)?;
let hash = blake2sum(&data_enc);
(Some(data_enc), hash)
}
};
Ok::<_, Error>((checksummer, tup))
})
.await
.unwrap()?;
checksummer = checksummer_updated;
dest_version.blocks.clear(); dest_version.blocks.clear();
dest_version.blocks.put( dest_version.blocks.put(
@ -575,10 +371,10 @@ pub async fn handle_upload_part_copy(
}, },
VersionBlock { VersionBlock {
hash: final_hash, hash: final_hash,
size: data_len, size: data.len() as u64,
}, },
); );
current_offset += data_len; current_offset += data.len() as u64;
let block_ref = BlockRef { let block_ref = BlockRef {
block: final_hash, block: final_hash,
@ -586,34 +382,36 @@ pub async fn handle_upload_part_copy(
deleted: false.into(), deleted: false.into(),
}; };
let (_, _, _, next) = futures::try_join!( let garage2 = garage.clone();
let res = futures::try_join!(
// Thing 1: if the block is not exactly a block that existed before, // Thing 1: if the block is not exactly a block that existed before,
// we need to insert that data as a new block. // we need to insert that data as a new block.
async { async move {
if let Some(final_data) = data_to_upload { if must_upload {
garage garage2
.block_manager .block_manager
.rpc_put_block(final_hash, final_data, dest_encryption.is_encrypted(), None) .rpc_put_block(final_hash, data, None)
.await .await
} else { } else {
Ok(()) Ok(())
} }
}, },
async {
// Thing 2: we need to insert the block in the version // Thing 2: we need to insert the block in the version
garage.version_table.insert(&dest_version), garage.version_table.insert(&dest_version).await?;
// Thing 3: we need to add a block reference // Thing 3: we need to add a block reference
garage.block_ref_table.insert(&block_ref), garage.block_ref_table.insert(&block_ref).await
// Thing 4: we need to read the next block },
// Thing 4: we need to prefetch the next block
defragmenter.next(), defragmenter.next(),
)?; )?;
next_block = next; next_block = res.2;
} }
assert_eq!(current_offset, source_range.length); assert_eq!(current_offset, source_range.length);
let checksums = checksummer.finalize(); let data_md5sum = md5hasher.finalize();
let etag = dest_encryption.etag_from_md5(&checksums.md5); let etag = hex::encode(data_md5sum);
let checksum = checksums.extract(dest_object_checksum_algorithm);
// Put the part's ETag in the Versiontable // Put the part's ETag in the Versiontable
dest_mpu.parts.put( dest_mpu.parts.put(
@ -621,7 +419,6 @@ pub async fn handle_upload_part_copy(
MpuPart { MpuPart {
version: dest_version_id, version: dest_version_id,
etag: Some(etag.clone()), etag: Some(etag.clone()),
checksum,
size: Some(current_offset), size: Some(current_offset),
}, },
); );
@ -634,14 +431,13 @@ pub async fn handle_upload_part_copy(
last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)), last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)),
})?; })?;
let mut resp = Response::builder() Ok(Response::builder()
.header("Content-Type", "application/xml") .header("Content-Type", "application/xml")
.header( .header(
"x-amz-copy-source-version-id", "x-amz-copy-source-version-id",
hex::encode(source_object_version.uuid), hex::encode(source_object_version.uuid),
); )
dest_encryption.add_response_headers(&mut resp); .body(string_body(resp_xml))?)
Ok(resp.body(string_body(resp_xml))?)
} }
async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object, Error> { async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object, Error> {
@ -656,8 +452,7 @@ async fn get_copy_source(ctx: &ReqCtx, req: &Request<ReqBody>) -> Result<Object,
let source_bucket_id = garage let source_bucket_id = garage
.bucket_helper() .bucket_helper()
.resolve_bucket(&source_bucket.to_string(), api_key) .resolve_bucket(&source_bucket.to_string(), api_key)
.await .await?;
.map_err(pass_helper_error)?;
if !api_key.allow_read(&source_bucket_id) { if !api_key.allow_read(&source_bucket_id) {
return Err(Error::forbidden(format!( return Err(Error::forbidden(format!(
@ -863,7 +658,7 @@ pub struct CopyPartResult {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::xml::to_xml_with_header; use crate::s3::xml::to_xml_with_header;
#[test] #[test]
fn copy_object_result() -> Result<(), Error> { fn copy_object_result() -> Result<(), Error> {

View File

@ -1,21 +1,30 @@
use quick_xml::de::from_reader; use quick_xml::de::from_reader;
use std::sync::Arc;
use hyper::{header::HeaderName, Method, Request, Response, StatusCode}; use http::header::{
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
};
use hyper::{
body::Body, body::Incoming as IncomingBody, header::HeaderName, Method, Request, Response,
StatusCode,
};
use http_body_util::BodyExt; use http_body_util::BodyExt;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule}; use crate::common_error::CommonError;
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_model::bucket_table::{Bucket, BucketParams, CorsRule as GarageCorsRule};
use garage_model::garage::Garage;
use garage_util::data::*; use garage_util::data::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> { pub async fn handle_get_cors(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx; let ReqCtx { bucket_params, .. } = ctx;
if let Some(cors) = bucket_params.cors_config.get() { if let Some(cors) = bucket_params.cors_config.get() {
@ -90,6 +99,154 @@ pub async fn handle_put_cors(
.body(empty_body())?) .body(empty_body())?)
} }
pub async fn handle_options_api(
garage: Arc<Garage>,
req: &Request<IncomingBody>,
bucket_name: Option<String>,
) -> Result<Response<EmptyBody>, CommonError> {
// FIXME: CORS rules of buckets with local aliases are
// not taken into account.
// If the bucket name is a global bucket name,
// we try to apply the CORS rules of that bucket.
// If a user has a local bucket name that has
// the same name, its CORS rules won't be applied
// and will be shadowed by the rules of the globally
// existing bucket (but this is inevitable because
// OPTIONS calls are not auhtenticated).
if let Some(bn) = bucket_name {
let helper = garage.bucket_helper();
let bucket_id = helper.resolve_global_bucket_name(&bn).await?;
if let Some(id) = bucket_id {
let bucket = garage.bucket_helper().get_existing_bucket(id).await?;
let bucket_params = bucket.state.into_option().unwrap();
handle_options_for_bucket(req, &bucket_params)
} else {
// If there is a bucket name in the request, but that name
// does not correspond to a global alias for a bucket,
// then it's either a non-existing bucket or a local bucket.
// We have no way of knowing, because the request is not
// authenticated and thus we can't resolve local aliases.
// We take the permissive approach of allowing everything,
// because we don't want to prevent web apps that use
// local bucket names from making API calls.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
} else {
// If there is no bucket name in the request,
// we are doing a ListBuckets call, which we want to allow
// for all origins.
Ok(Response::builder()
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
.status(StatusCode::OK)
.body(EmptyBody::new())?)
}
}
pub fn handle_options_for_bucket(
req: &Request<IncomingBody>,
bucket_params: &BucketParams,
) -> Result<Response<EmptyBody>, CommonError> {
let origin = req
.headers()
.get("Origin")
.ok_or_bad_request("Missing Origin header")?
.to_str()?;
let request_method = req
.headers()
.get(ACCESS_CONTROL_REQUEST_METHOD)
.ok_or_bad_request("Missing Access-Control-Request-Method header")?
.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
if let Some(cors_config) = bucket_params.cors_config.get() {
let matching_rule = cors_config
.iter()
.find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter()));
if let Some(rule) = matching_rule {
let mut resp = Response::builder()
.status(StatusCode::OK)
.body(EmptyBody::new())?;
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
return Ok(resp);
}
}
Err(CommonError::Forbidden(
"This CORS request is not allowed.".into(),
))
}
pub fn find_matching_cors_rule<'a>(
bucket_params: &'a BucketParams,
req: &Request<impl Body>,
) -> Result<Option<&'a GarageCorsRule>, Error> {
if let Some(cors_config) = bucket_params.cors_config.get() {
if let Some(origin) = req.headers().get("Origin") {
let origin = origin.to_str()?;
let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) {
Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::<Vec<_>>(),
None => vec![],
};
return Ok(cors_config.iter().find(|rule| {
cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter())
}));
}
}
Ok(None)
}
fn cors_rule_matches<'a, HI, S>(
rule: &GarageCorsRule,
origin: &'a str,
method: &'a str,
mut request_headers: HI,
) -> bool
where
HI: Iterator<Item = S>,
S: AsRef<str>,
{
rule.allow_origins.iter().any(|x| x == "*" || x == origin)
&& rule.allow_methods.iter().any(|x| x == "*" || x == method)
&& request_headers.all(|h| {
rule.allow_headers
.iter()
.any(|x| x == "*" || x == h.as_ref())
})
}
pub fn add_cors_headers(
resp: &mut Response<impl Body>,
rule: &GarageCorsRule,
) -> Result<(), http::header::InvalidHeaderValue> {
let h = resp.headers_mut();
h.insert(
ACCESS_CONTROL_ALLOW_ORIGIN,
rule.allow_origins.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_METHODS,
rule.allow_methods.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_ALLOW_HEADERS,
rule.allow_headers.join(", ").parse()?,
);
h.insert(
ACCESS_CONTROL_EXPOSE_HEADERS,
rule.expose_headers.join(", ").parse()?,
);
Ok(())
}
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ---- // ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]

View File

@ -5,13 +5,12 @@ use garage_util::data::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_api_common::helpers::*; use crate::helpers::*;
use garage_api_common::signature::verify_signed_content; use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::api_server::{ReqBody, ResBody}; use crate::s3::put::next_timestamp;
use crate::error::*; use crate::s3::xml as s3_xml;
use crate::put::next_timestamp; use crate::signature::verify_signed_content;
use crate::xml as s3_xml;
async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> { async fn handle_delete_internal(ctx: &ReqCtx, key: &str) -> Result<(Uuid, Uuid), Error> {
let ReqCtx { let ReqCtx {

View File

@ -1,596 +0,0 @@
use std::borrow::Cow;
use std::convert::TryInto;
use std::pin::Pin;
use aes_gcm::{
aead::stream::{DecryptorLE31, EncryptorLE31, StreamLE31},
aead::{Aead, AeadCore, KeyInit, OsRng},
aes::cipher::crypto_common::rand_core::RngCore,
aes::cipher::typenum::Unsigned,
Aes256Gcm, Key, Nonce,
};
use base64::prelude::*;
use bytes::Bytes;
use futures::stream::Stream;
use futures::task;
use tokio::io::BufReader;
use http::header::{HeaderMap, HeaderName, HeaderValue};
use garage_net::bytes_buf::BytesBuf;
use garage_net::stream::{stream_asyncread, ByteStream};
use garage_rpc::rpc_helper::OrderTag;
use garage_util::data::Hash;
use garage_util::error::Error as GarageError;
use garage_util::migrate::Migrate;
use garage_model::garage::Garage;
use garage_model::s3::object_table::{ObjectVersionEncryption, ObjectVersionMetaInner};
use garage_api_common::common_error::*;
use crate::checksum::Md5Checksum;
use crate::error::Error;
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-server-side-encryption-customer-algorithm");
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: HeaderName =
HeaderName::from_static("x-amz-server-side-encryption-customer-key");
const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: HeaderName =
HeaderName::from_static("x-amz-server-side-encryption-customer-key-md5");
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: HeaderName =
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-algorithm");
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: HeaderName =
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-key");
const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: HeaderName =
HeaderName::from_static("x-amz-copy-source-server-side-encryption-customer-key-md5");
const CUSTOMER_ALGORITHM_AES256: &[u8] = b"AES256";
type Md5Output = md5::digest::Output<md5::Md5Core>;
type StreamNonceSize = aes_gcm::aead::stream::NonceSize<Aes256Gcm, StreamLE31<Aes256Gcm>>;
// Data blocks are encrypted by smaller chunks of size 4096 bytes,
// so that data can be streamed when reading.
// This size has to be known and has to be constant, or data won't be
// readable anymore. DO NOT CHANGE THIS VALUE.
const STREAM_ENC_PLAIN_CHUNK_SIZE: usize = 0x1000; // 4096 bytes
const STREAM_ENC_CYPER_CHUNK_SIZE: usize = STREAM_ENC_PLAIN_CHUNK_SIZE + 16;
#[derive(Clone, Copy)]
pub enum EncryptionParams {
Plaintext,
SseC {
client_key: Key<Aes256Gcm>,
client_key_md5: Md5Output,
compression_level: Option<i32>,
},
}
impl EncryptionParams {
pub fn is_encrypted(&self) -> bool {
!matches!(self, Self::Plaintext)
}
pub fn is_same(a: &Self, b: &Self) -> bool {
let relevant_info = |x: &Self| match x {
Self::Plaintext => None,
Self::SseC {
client_key,
compression_level,
..
} => Some((*client_key, compression_level.is_some())),
};
relevant_info(a) == relevant_info(b)
}
pub fn new_from_headers(
garage: &Garage,
headers: &HeaderMap,
) -> Result<EncryptionParams, Error> {
let key = parse_request_headers(
headers,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
)?;
match key {
Some((client_key, client_key_md5)) => Ok(EncryptionParams::SseC {
client_key,
client_key_md5,
compression_level: garage.config.compression_level,
}),
None => Ok(EncryptionParams::Plaintext),
}
}
pub fn add_response_headers(&self, resp: &mut http::response::Builder) {
if let Self::SseC { client_key_md5, .. } = self {
let md5 = BASE64_STANDARD.encode(&client_key_md5);
resp.headers_mut().unwrap().insert(
X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
HeaderValue::from_bytes(CUSTOMER_ALGORITHM_AES256).unwrap(),
);
resp.headers_mut().unwrap().insert(
X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
HeaderValue::from_bytes(md5.as_bytes()).unwrap(),
);
}
}
pub fn check_decrypt<'a>(
garage: &Garage,
headers: &HeaderMap,
obj_enc: &'a ObjectVersionEncryption,
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
let key = parse_request_headers(
headers,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
&X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
)?;
Self::check_decrypt_common(garage, key, obj_enc)
}
pub fn check_decrypt_for_copy_source<'a>(
garage: &Garage,
headers: &HeaderMap,
obj_enc: &'a ObjectVersionEncryption,
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
let key = parse_request_headers(
headers,
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
&X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
)?;
Self::check_decrypt_common(garage, key, obj_enc)
}
fn check_decrypt_common<'a>(
garage: &Garage,
key: Option<(Key<Aes256Gcm>, Md5Output)>,
obj_enc: &'a ObjectVersionEncryption,
) -> Result<(Self, Cow<'a, ObjectVersionMetaInner>), Error> {
match (key, &obj_enc) {
(
Some((client_key, client_key_md5)),
ObjectVersionEncryption::SseC { inner, compressed },
) => {
let enc = Self::SseC {
client_key,
client_key_md5,
compression_level: if *compressed {
Some(garage.config.compression_level.unwrap_or(1))
} else {
None
},
};
let plaintext = enc.decrypt_blob(&inner)?;
let inner = ObjectVersionMetaInner::decode(&plaintext)
.ok_or_internal_error("Could not decode encrypted metadata")?;
Ok((enc, Cow::Owned(inner)))
}
(None, ObjectVersionEncryption::Plaintext { inner }) => {
Ok((Self::Plaintext, Cow::Borrowed(inner)))
}
(_, ObjectVersionEncryption::SseC { .. }) => {
Err(Error::bad_request("Object is encrypted"))
}
(Some(_), _) => {
// TODO: should this be an OK scenario?
Err(Error::bad_request("Trying to decrypt a plaintext object"))
}
}
}
pub fn encrypt_meta(
&self,
meta: ObjectVersionMetaInner,
) -> Result<ObjectVersionEncryption, Error> {
match self {
Self::SseC {
compression_level, ..
} => {
let plaintext = meta.encode().map_err(GarageError::from)?;
let ciphertext = self.encrypt_blob(&plaintext)?;
Ok(ObjectVersionEncryption::SseC {
inner: ciphertext.into_owned(),
compressed: compression_level.is_some(),
})
}
Self::Plaintext => Ok(ObjectVersionEncryption::Plaintext { inner: meta }),
}
}
// ---- generating object Etag values ----
pub fn etag_from_md5(&self, md5sum: &Option<Md5Checksum>) -> String {
match self {
Self::Plaintext => md5sum
.map(|x| hex::encode(&x[..]))
.expect("md5 digest should have been computed"),
Self::SseC { .. } => {
// AWS specifies that for encrypted objects, the Etag is not
// the md5sum of the data, but doesn't say what it is.
// So we just put some random bytes.
let mut random = [0u8; 16];
OsRng.fill_bytes(&mut random);
hex::encode(&random)
}
}
}
// ---- generic function for encrypting / decrypting blobs ----
// Prepends a randomly-generated nonce to the encrypted value.
// This is used for encrypting object metadata and inlined data for small objects.
// This does not compress anything.
pub fn encrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
match self {
Self::SseC { client_key, .. } => {
let cipher = Aes256Gcm::new(&client_key);
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
let ciphertext = cipher
.encrypt(&nonce, blob)
.ok_or_internal_error("Encryption failed")?;
Ok(Cow::Owned([nonce.to_vec(), ciphertext].concat()))
}
Self::Plaintext => Ok(Cow::Borrowed(blob)),
}
}
pub fn decrypt_blob<'a>(&self, blob: &'a [u8]) -> Result<Cow<'a, [u8]>, Error> {
match self {
Self::SseC { client_key, .. } => {
let cipher = Aes256Gcm::new(&client_key);
let nonce_size = <Aes256Gcm as AeadCore>::NonceSize::to_usize();
let nonce = Nonce::from_slice(
blob.get(..nonce_size)
.ok_or_internal_error("invalid encrypted data")?,
);
let plaintext = cipher
.decrypt(nonce, &blob[nonce_size..])
.ok_or_bad_request(
"Invalid encryption key, could not decrypt object metadata.",
)?;
Ok(Cow::Owned(plaintext))
}
Self::Plaintext => Ok(Cow::Borrowed(blob)),
}
}
// ---- function for encrypting / decrypting byte streams ----
/// Get a data block from the storage node, and decrypt+decompress it
/// if necessary. If object is plaintext, just get it without any processing.
pub async fn get_block(
&self,
garage: &Garage,
hash: &Hash,
order: Option<OrderTag>,
) -> Result<ByteStream, GarageError> {
let raw_block = garage
.block_manager
.rpc_get_block_streaming(hash, order)
.await?;
Ok(self.decrypt_block_stream(raw_block))
}
pub fn decrypt_block_stream(&self, stream: ByteStream) -> ByteStream {
match self {
Self::Plaintext => stream,
Self::SseC {
client_key,
compression_level,
..
} => {
let plaintext = DecryptStream::new(stream, *client_key);
if compression_level.is_some() {
let reader = stream_asyncread(Box::pin(plaintext));
let reader = BufReader::new(reader);
let reader = async_compression::tokio::bufread::ZstdDecoder::new(reader);
Box::pin(tokio_util::io::ReaderStream::new(reader))
} else {
Box::pin(plaintext)
}
}
}
}
/// Encrypt a data block if encryption is set, for use before
/// putting the data blocks into storage
pub fn encrypt_block(&self, block: Bytes) -> Result<Bytes, Error> {
match self {
Self::Plaintext => Ok(block),
Self::SseC {
client_key,
compression_level,
..
} => {
let block = if let Some(level) = compression_level {
Cow::Owned(
garage_block::zstd_encode(block.as_ref(), *level)
.ok_or_internal_error("failed to compress data block")?,
)
} else {
Cow::Borrowed(block.as_ref())
};
let mut ret = Vec::with_capacity(block.len() + 32 + block.len() / 64);
let mut nonce: Nonce<StreamNonceSize> = Default::default();
OsRng.fill_bytes(&mut nonce);
ret.extend_from_slice(nonce.as_slice());
let mut cipher = EncryptorLE31::<Aes256Gcm>::new(&client_key, &nonce);
let mut iter = block.chunks(STREAM_ENC_PLAIN_CHUNK_SIZE).peekable();
if iter.peek().is_none() {
// Empty stream: we encrypt an empty last chunk
let chunk_enc = cipher
.encrypt_last(&[][..])
.ok_or_internal_error("failed to encrypt chunk")?;
ret.extend_from_slice(&chunk_enc);
} else {
loop {
let chunk = iter.next().unwrap();
if iter.peek().is_some() {
let chunk_enc = cipher
.encrypt_next(chunk)
.ok_or_internal_error("failed to encrypt chunk")?;
assert_eq!(chunk.len(), STREAM_ENC_PLAIN_CHUNK_SIZE);
assert_eq!(chunk_enc.len(), STREAM_ENC_CYPER_CHUNK_SIZE);
ret.extend_from_slice(&chunk_enc);
} else {
// use encrypt_last for the last chunk
let chunk_enc = cipher
.encrypt_last(chunk)
.ok_or_internal_error("failed to encrypt chunk")?;
ret.extend_from_slice(&chunk_enc);
break;
}
}
}
Ok(ret.into())
}
}
}
}
fn parse_request_headers(
headers: &HeaderMap,
alg_header: &HeaderName,
key_header: &HeaderName,
md5_header: &HeaderName,
) -> Result<Option<(Key<Aes256Gcm>, Md5Output)>, Error> {
let alg = headers.get(alg_header).map(HeaderValue::as_bytes);
let key = headers.get(key_header).map(HeaderValue::as_bytes);
let md5 = headers.get(md5_header).map(HeaderValue::as_bytes);
match alg {
Some(CUSTOMER_ALGORITHM_AES256) => {
use md5::{Digest, Md5};
let key_b64 =
key.ok_or_bad_request("Missing server-side-encryption-customer-key header")?;
let key_bytes: [u8; 32] = BASE64_STANDARD
.decode(&key_b64)
.ok_or_bad_request(
"Invalid server-side-encryption-customer-key header: invalid base64",
)?
.try_into()
.ok()
.ok_or_bad_request(
"Invalid server-side-encryption-customer-key header: invalid length",
)?;
let md5_b64 =
md5.ok_or_bad_request("Missing server-side-encryption-customer-key-md5 header")?;
let md5_bytes = BASE64_STANDARD.decode(&md5_b64).ok_or_bad_request(
"Invalid server-side-encryption-customer-key-md5 header: invalid bass64",
)?;
let mut hasher = Md5::new();
hasher.update(&key_bytes[..]);
let our_md5 = hasher.finalize();
if our_md5.as_slice() != md5_bytes.as_slice() {
return Err(Error::bad_request(
"Server-side encryption client key MD5 checksum does not match",
));
}
Ok(Some((key_bytes.into(), our_md5)))
}
Some(alg) => Err(Error::InvalidEncryptionAlgorithm(
String::from_utf8_lossy(alg).into_owned(),
)),
None => {
if key.is_some() || md5.is_some() {
Err(Error::bad_request(
"Unexpected server-side-encryption-customer-key{,-md5} header(s)",
))
} else {
Ok(None)
}
}
}
}
// ---- encrypt & decrypt streams ----
#[pin_project::pin_project]
struct DecryptStream {
#[pin]
stream: ByteStream,
done_reading: bool,
buf: BytesBuf,
key: Key<Aes256Gcm>,
state: DecryptStreamState,
}
enum DecryptStreamState {
Starting,
Running(DecryptorLE31<Aes256Gcm>),
Done,
}
impl DecryptStream {
fn new(stream: ByteStream, key: Key<Aes256Gcm>) -> Self {
Self {
stream,
done_reading: false,
buf: BytesBuf::new(),
key,
state: DecryptStreamState::Starting,
}
}
}
impl Stream for DecryptStream {
type Item = Result<Bytes, std::io::Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> task::Poll<Option<Self::Item>> {
use std::task::Poll;
let mut this = self.project();
// The first bytes of the stream should contain the starting nonce.
// If we don't have a Running state, it means that we haven't
// yet read the nonce.
while matches!(this.state, DecryptStreamState::Starting) {
let nonce_size = StreamNonceSize::to_usize();
if let Some(nonce) = this.buf.take_exact(nonce_size) {
let nonce = Nonce::from_slice(nonce.as_ref());
*this.state = DecryptStreamState::Running(DecryptorLE31::new(&this.key, nonce));
break;
}
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
Some(Ok(bytes)) => {
this.buf.extend(bytes);
}
Some(Err(e)) => {
return Poll::Ready(Some(Err(e)));
}
None => {
return Poll::Ready(Some(Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"Decrypt: unexpected EOF, could not read nonce",
))));
}
}
}
// Read at least one byte more than the encrypted chunk size
// (if possible), so that we know if we are decrypting the
// last chunk or not.
while !*this.done_reading && this.buf.len() <= STREAM_ENC_CYPER_CHUNK_SIZE {
match futures::ready!(this.stream.as_mut().poll_next(cx)) {
Some(Ok(bytes)) => {
this.buf.extend(bytes);
}
Some(Err(e)) => {
return Poll::Ready(Some(Err(e)));
}
None => {
*this.done_reading = true;
break;
}
}
}
if matches!(this.state, DecryptStreamState::Done) {
if !this.buf.is_empty() {
return Poll::Ready(Some(Err(std::io::Error::new(
std::io::ErrorKind::Other,
"Decrypt: unexpected bytes after last encrypted chunk",
))));
}
return Poll::Ready(None);
}
let res = if this.buf.len() > STREAM_ENC_CYPER_CHUNK_SIZE {
// we have strictly more bytes than the encrypted chunk size,
// so we know this is not the last
let DecryptStreamState::Running(ref mut cipher) = this.state else {
unreachable!()
};
let chunk = this.buf.take_exact(STREAM_ENC_CYPER_CHUNK_SIZE).unwrap();
let chunk_dec = cipher.decrypt_next(chunk.as_ref());
if let Ok(c) = &chunk_dec {
assert_eq!(c.len(), STREAM_ENC_PLAIN_CHUNK_SIZE);
}
chunk_dec
} else {
// We have one encrypted chunk size or less, even though we tried
// to read more, so this is the last chunk. Decrypt using the
// appropriate decrypt_last() function that then destroys the cipher.
let state = std::mem::replace(this.state, DecryptStreamState::Done);
let DecryptStreamState::Running(cipher) = state else {
unreachable!()
};
let chunk = this.buf.take_all();
cipher.decrypt_last(chunk.as_ref())
};
match res {
Ok(bytes) if bytes.is_empty() => Poll::Ready(None),
Ok(bytes) => Poll::Ready(Some(Ok(bytes.into()))),
Err(_) => Poll::Ready(Some(Err(std::io::Error::new(
std::io::ErrorKind::Other,
"Decryption failed",
)))),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::stream::StreamExt;
use garage_net::stream::read_stream_to_end;
fn stream() -> ByteStream {
Box::pin(
futures::stream::iter(16usize..1024)
.map(|i| Ok(Bytes::from(vec![(i % 256) as u8; (i * 37) % 1024]))),
)
}
async fn test_block_enc(compression_level: Option<i32>) {
let enc = EncryptionParams::SseC {
client_key: Aes256Gcm::generate_key(&mut OsRng),
client_key_md5: Default::default(), // not needed
compression_level,
};
let block_plain = read_stream_to_end(stream()).await.unwrap().into_bytes();
let block_enc = enc.encrypt_block(block_plain.clone()).unwrap();
let block_dec =
enc.decrypt_block_stream(Box::pin(futures::stream::once(async { Ok(block_enc) })));
let block_dec = read_stream_to_end(block_dec).await.unwrap().into_bytes();
assert_eq!(block_plain, block_dec);
assert!(block_dec.len() > 128000);
}
#[tokio::test]
async fn test_encrypt_block() {
test_block_enc(None).await
}
#[tokio::test]
async fn test_encrypt_block_compressed() {
test_block_enc(Some(1)).await
}
}

View File

@ -4,30 +4,19 @@ use err_derive::Error;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, StatusCode}; use hyper::{HeaderMap, StatusCode};
use garage_model::helper::error::Error as HelperError; use crate::common_error::CommonError;
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
pub(crate) use garage_api_common::common_error::pass_helper_error; use crate::generic_server::ApiError;
use crate::helpers::*;
use garage_api_common::common_error::{ use crate::s3::xml as s3_xml;
commonErrorDerivative, helper_error_as_internal, CommonError, use crate::signature::error::Error as SignatureError;
};
pub use garage_api_common::common_error::{
CommonErrorDerivative, OkOrBadRequest, OkOrInternalError,
};
use garage_api_common::generic_server::ApiError;
use garage_api_common::helpers::*;
use garage_api_common::signature::error::Error as SignatureError;
use crate::xml as s3_xml;
/// Errors of this crate /// Errors of this crate
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum Error { pub enum Error {
#[error(display = "{}", _0)] #[error(display = "{}", _0)]
/// Error from common error /// Error from common error
Common(#[error(source)] CommonError), Common(CommonError),
// Category: cannot process // Category: cannot process
/// Authorization Header Malformed /// Authorization Header Malformed
@ -76,28 +65,21 @@ pub enum Error {
#[error(display = "Invalid HTTP range: {:?}", _0)] #[error(display = "Invalid HTTP range: {:?}", _0)]
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)), InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
/// The client sent a range header with invalid value
#[error(display = "Invalid encryption algorithm: {:?}, should be AES256", _0)]
InvalidEncryptionAlgorithm(String),
/// The client sent invalid XML data
#[error(display = "Invalid digest: {}", _0)]
InvalidDigest(String),
/// The client sent a request for an action not supported by garage /// The client sent a request for an action not supported by garage
#[error(display = "Unimplemented action: {}", _0)] #[error(display = "Unimplemented action: {}", _0)]
NotImplemented(String), NotImplemented(String),
} }
commonErrorDerivative!(Error); impl<T> From<T> for Error
where
CommonError: From<T>,
{
fn from(err: T) -> Self {
Error::Common(CommonError::from(err))
}
}
// Helper errors are always passed as internal errors by default. impl CommonErrorDerivative for Error {}
// To pass the specific error code back to the client, use `pass_helper_error`.
impl From<HelperError> for Error {
fn from(err: HelperError) -> Error {
Error::Common(helper_error_as_internal(err))
}
}
impl From<roxmltree::Error> for Error { impl From<roxmltree::Error> for Error {
fn from(err: roxmltree::Error) -> Self { fn from(err: roxmltree::Error) -> Self {
@ -143,9 +125,7 @@ impl Error {
Error::NotImplemented(_) => "NotImplemented", Error::NotImplemented(_) => "NotImplemented",
Error::InvalidXml(_) => "MalformedXML", Error::InvalidXml(_) => "MalformedXML",
Error::InvalidRange(_) => "InvalidRange", Error::InvalidRange(_) => "InvalidRange",
Error::InvalidDigest(_) => "InvalidDigest",
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest", Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
Error::InvalidEncryptionAlgorithm(_) => "InvalidEncryptionAlgorithmError",
} }
} }
} }
@ -163,8 +143,6 @@ impl ApiError for Error {
| Error::InvalidPart | Error::InvalidPart
| Error::InvalidPartOrder | Error::InvalidPartOrder
| Error::EntityTooSmall | Error::EntityTooSmall
| Error::InvalidDigest(_)
| Error::InvalidEncryptionAlgorithm(_)
| Error::InvalidXml(_) | Error::InvalidXml(_)
| Error::InvalidUtf8Str(_) | Error::InvalidUtf8Str(_)
| Error::InvalidUtf8String(_) => StatusCode::BAD_REQUEST, | Error::InvalidUtf8String(_) => StatusCode::BAD_REQUEST,

View File

@ -1,12 +1,10 @@
//! Function related to GET and HEAD requests //! Function related to GET and HEAD requests
use std::collections::BTreeMap;
use std::convert::TryInto; use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, UNIX_EPOCH}; use std::time::{Duration, UNIX_EPOCH};
use bytes::Bytes;
use futures::future; use futures::future;
use futures::stream::{self, Stream, StreamExt}; use futures::stream::{self, StreamExt};
use http::header::{ use http::header::{
ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE, ACCEPT_RANGES, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, EXPIRES, IF_MODIFIED_SINCE, IF_NONE_MATCH,
@ -25,12 +23,9 @@ use garage_model::garage::Garage;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_api_common::helpers::*; use crate::helpers::*;
use crate::s3::api_server::ResBody;
use crate::api_server::ResBody; use crate::s3::error::*;
use crate::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
use crate::encryption::EncryptionParams;
use crate::error::*;
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count"; const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
@ -47,9 +42,6 @@ pub struct GetObjectOverrides {
fn object_headers( fn object_headers(
version: &ObjectVersion, version: &ObjectVersion,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
meta_inner: &ObjectVersionMetaInner,
encryption: EncryptionParams,
checksum_mode: ChecksumMode,
) -> http::response::Builder { ) -> http::response::Builder {
debug!("Version meta: {:?}", version_meta); debug!("Version meta: {:?}", version_meta);
@ -57,6 +49,7 @@ fn object_headers(
let date_str = httpdate::fmt_http_date(date); let date_str = httpdate::fmt_http_date(date);
let mut resp = Response::builder() let mut resp = Response::builder()
.header(CONTENT_TYPE, version_meta.headers.content_type.to_string())
.header(LAST_MODIFIED, date_str) .header(LAST_MODIFIED, date_str)
.header(ACCEPT_RANGES, "bytes".to_string()); .header(ACCEPT_RANGES, "bytes".to_string());
@ -64,28 +57,10 @@ fn object_headers(
resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag)); resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag));
} }
// When metadata is retrieved through the REST API, Amazon S3 combines headers that for (k, v) in version_meta.headers.other.iter() {
// have the same name (ignoring case) into a comma-delimited list. resp = resp.header(k, v.to_string());
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
let mut headers_by_name = BTreeMap::new();
for (name, value) in meta_inner.headers.iter() {
let name_lower = name.to_ascii_lowercase();
headers_by_name
.entry(name_lower)
.or_insert(vec![])
.push(value.as_str());
} }
for (name, values) in headers_by_name {
resp = resp.header(name, values.join(","));
}
if checksum_mode.enabled {
resp = add_checksum_response_headers(&meta_inner.checksum, resp);
}
encryption.add_response_headers(&mut resp);
resp resp
} }
@ -200,29 +175,17 @@ pub async fn handle_head_without_ctx(
return Ok(cached); return Ok(cached);
} }
let (encryption, headers) =
EncryptionParams::check_decrypt(&garage, req.headers(), &version_meta.encryption)?;
let checksum_mode = checksum_mode(&req);
if let Some(pn) = part_number { if let Some(pn) = part_number {
match version_data { match version_data {
ObjectVersionData::Inline(_, _) => { ObjectVersionData::Inline(_, bytes) => {
if pn != 1 { if pn != 1 {
return Err(Error::InvalidPart); return Err(Error::InvalidPart);
} }
let bytes_len = version_meta.size; Ok(object_headers(object_version, version_meta)
Ok(object_headers( .header(CONTENT_LENGTH, format!("{}", bytes.len()))
object_version,
version_meta,
&headers,
encryption,
checksum_mode,
)
.header(CONTENT_LENGTH, format!("{}", bytes_len))
.header( .header(
CONTENT_RANGE, CONTENT_RANGE,
format!("bytes 0-{}/{}", bytes_len - 1, bytes_len), format!("bytes 0-{}/{}", bytes.len() - 1, bytes.len()),
) )
.header(X_AMZ_MP_PARTS_COUNT, "1") .header(X_AMZ_MP_PARTS_COUNT, "1")
.status(StatusCode::PARTIAL_CONTENT) .status(StatusCode::PARTIAL_CONTENT)
@ -238,13 +201,7 @@ pub async fn handle_head_without_ctx(
let (part_offset, part_end) = let (part_offset, part_end) =
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
Ok(object_headers( Ok(object_headers(object_version, version_meta)
object_version,
version_meta,
&headers,
encryption,
checksum_mode,
)
.header(CONTENT_LENGTH, format!("{}", part_end - part_offset)) .header(CONTENT_LENGTH, format!("{}", part_end - part_offset))
.header( .header(
CONTENT_RANGE, CONTENT_RANGE,
@ -262,13 +219,7 @@ pub async fn handle_head_without_ctx(
_ => unreachable!(), _ => unreachable!(),
} }
} else { } else {
Ok(object_headers( Ok(object_headers(object_version, version_meta)
object_version,
version_meta,
&headers,
encryption,
checksum_mode,
)
.header(CONTENT_LENGTH, format!("{}", version_meta.size)) .header(CONTENT_LENGTH, format!("{}", version_meta.size))
.status(StatusCode::OK) .status(StatusCode::OK)
.body(empty_body())?) .body(empty_body())?)
@ -322,55 +273,23 @@ pub async fn handle_get_without_ctx(
return Ok(cached); return Ok(cached);
} }
let (enc, headers) =
EncryptionParams::check_decrypt(&garage, req.headers(), &last_v_meta.encryption)?;
let checksum_mode = checksum_mode(&req);
match (part_number, parse_range_header(req, last_v_meta.size)?) { match (part_number, parse_range_header(req, last_v_meta.size)?) {
(Some(_), Some(_)) => Err(Error::bad_request( (Some(_), Some(_)) => Err(Error::bad_request(
"Cannot specify both partNumber and Range header", "Cannot specify both partNumber and Range header",
)), )),
(Some(pn), None) => { (Some(pn), None) => handle_get_part(garage, last_v, last_v_data, last_v_meta, pn).await,
handle_get_part(
garage,
last_v,
last_v_data,
last_v_meta,
enc,
&headers,
pn,
checksum_mode,
)
.await
}
(None, Some(range)) => { (None, Some(range)) => {
handle_get_range( handle_get_range(
garage, garage,
last_v, last_v,
last_v_data, last_v_data,
last_v_meta, last_v_meta,
enc,
&headers,
range.start, range.start,
range.start + range.length, range.start + range.length,
checksum_mode,
)
.await
}
(None, None) => {
handle_get_full(
garage,
last_v,
last_v_data,
last_v_meta,
enc,
&headers,
overrides,
checksum_mode,
) )
.await .await
} }
(None, None) => handle_get_full(garage, last_v, last_v_data, last_v_meta, overrides).await,
} }
} }
@ -379,43 +298,17 @@ async fn handle_get_full(
version: &ObjectVersion, version: &ObjectVersion,
version_data: &ObjectVersionData, version_data: &ObjectVersionData,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
encryption: EncryptionParams,
meta_inner: &ObjectVersionMetaInner,
overrides: GetObjectOverrides, overrides: GetObjectOverrides,
checksum_mode: ChecksumMode,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
let mut resp_builder = object_headers( let mut resp_builder = object_headers(version, version_meta)
version,
version_meta,
&meta_inner,
encryption,
checksum_mode,
)
.header(CONTENT_LENGTH, format!("{}", version_meta.size)) .header(CONTENT_LENGTH, format!("{}", version_meta.size))
.status(StatusCode::OK); .status(StatusCode::OK);
getobject_override_headers(overrides, &mut resp_builder)?; getobject_override_headers(overrides, &mut resp_builder)?;
let stream = full_object_byte_stream(garage, version, version_data, encryption);
Ok(resp_builder.body(response_body_from_stream(stream))?)
}
pub fn full_object_byte_stream(
garage: Arc<Garage>,
version: &ObjectVersion,
version_data: &ObjectVersionData,
encryption: EncryptionParams,
) -> ByteStream {
match &version_data { match &version_data {
ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::DeleteMarker => unreachable!(),
ObjectVersionData::Inline(_, bytes) => { ObjectVersionData::Inline(_, bytes) => {
let bytes = bytes.to_vec(); Ok(resp_builder.body(bytes_body(bytes.to_vec().into()))?)
Box::pin(futures::stream::once(async move {
encryption
.decrypt_blob(&bytes)
.map(|x| Bytes::from(x.to_vec()))
.map_err(std_error_from_read_error)
}))
} }
ObjectVersionData::FirstBlock(_, first_block_hash) => { ObjectVersionData::FirstBlock(_, first_block_hash) => {
let (tx, rx) = mpsc::channel::<ByteStream>(2); let (tx, rx) = mpsc::channel::<ByteStream>(2);
@ -431,18 +324,19 @@ pub fn full_object_byte_stream(
garage2.version_table.get(&version_uuid, &EmptyKey).await garage2.version_table.get(&version_uuid, &EmptyKey).await
}); });
let stream_block_0 = encryption let stream_block_0 = garage
.get_block(&garage, &first_block_hash, Some(order_stream.order(0))) .block_manager
.rpc_get_block_streaming(&first_block_hash, Some(order_stream.order(0)))
.await?; .await?;
tx.send(stream_block_0) tx.send(stream_block_0)
.await .await
.ok_or_message("channel closed")?; .ok_or_message("channel closed")?;
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?; let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) { for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
let stream_block_i = encryption let stream_block_i = garage
.get_block(&garage, &vb.hash, Some(order_stream.order(i as u64))) .block_manager
.rpc_get_block_streaming(&vb.hash, Some(order_stream.order(i as u64)))
.await?; .await?;
tx.send(stream_block_i) tx.send(stream_block_i)
.await .await
@ -460,7 +354,8 @@ pub fn full_object_byte_stream(
} }
}); });
Box::pin(tokio_stream::wrappers::ReceiverStream::new(rx).flatten()) let body = response_body_from_block_stream(rx);
Ok(resp_builder.body(body)?)
} }
} }
} }
@ -470,16 +365,13 @@ async fn handle_get_range(
version: &ObjectVersion, version: &ObjectVersion,
version_data: &ObjectVersionData, version_data: &ObjectVersionData,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
encryption: EncryptionParams,
meta_inner: &ObjectVersionMetaInner,
begin: u64, begin: u64,
end: u64, end: u64,
checksum_mode: ChecksumMode,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
// Here we do not use getobject_override_headers because we don't // Here we do not use getobject_override_headers because we don't
// want to add any overridden headers (those should not be added // want to add any overridden headers (those should not be added
// when returning PARTIAL_CONTENT) // when returning PARTIAL_CONTENT)
let resp_builder = object_headers(version, version_meta, meta_inner, encryption, checksum_mode) let resp_builder = object_headers(version, version_meta)
.header(CONTENT_LENGTH, format!("{}", end - begin)) .header(CONTENT_LENGTH, format!("{}", end - begin))
.header( .header(
CONTENT_RANGE, CONTENT_RANGE,
@ -490,7 +382,6 @@ async fn handle_get_range(
match &version_data { match &version_data {
ObjectVersionData::DeleteMarker => unreachable!(), ObjectVersionData::DeleteMarker => unreachable!(),
ObjectVersionData::Inline(_meta, bytes) => { ObjectVersionData::Inline(_meta, bytes) => {
let bytes = encryption.decrypt_blob(&bytes)?;
if end as usize <= bytes.len() { if end as usize <= bytes.len() {
let body = bytes_body(bytes[begin as usize..end as usize].to_vec().into()); let body = bytes_body(bytes[begin as usize..end as usize].to_vec().into());
Ok(resp_builder.body(body)?) Ok(resp_builder.body(body)?)
@ -507,8 +398,7 @@ async fn handle_get_range(
.await? .await?
.ok_or(Error::NoSuchKey)?; .ok_or(Error::NoSuchKey)?;
let body = let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
Ok(resp_builder.body(body)?) Ok(resp_builder.body(body)?)
} }
} }
@ -519,28 +409,17 @@ async fn handle_get_part(
object_version: &ObjectVersion, object_version: &ObjectVersion,
version_data: &ObjectVersionData, version_data: &ObjectVersionData,
version_meta: &ObjectVersionMeta, version_meta: &ObjectVersionMeta,
encryption: EncryptionParams,
meta_inner: &ObjectVersionMetaInner,
part_number: u64, part_number: u64,
checksum_mode: ChecksumMode,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
// Same as for get_range, no getobject_override_headers // Same as for get_range, no getobject_override_headers
let resp_builder = object_headers( let resp_builder =
object_version, object_headers(object_version, version_meta).status(StatusCode::PARTIAL_CONTENT);
version_meta,
meta_inner,
encryption,
checksum_mode,
)
.status(StatusCode::PARTIAL_CONTENT);
match version_data { match version_data {
ObjectVersionData::Inline(_, bytes) => { ObjectVersionData::Inline(_, bytes) => {
if part_number != 1 { if part_number != 1 {
return Err(Error::InvalidPart); return Err(Error::InvalidPart);
} }
let bytes = encryption.decrypt_blob(&bytes)?;
assert_eq!(bytes.len() as u64, version_meta.size);
Ok(resp_builder Ok(resp_builder
.header(CONTENT_LENGTH, format!("{}", bytes.len())) .header(CONTENT_LENGTH, format!("{}", bytes.len()))
.header( .header(
@ -548,7 +427,7 @@ async fn handle_get_part(
format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()), format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()),
) )
.header(X_AMZ_MP_PARTS_COUNT, "1") .header(X_AMZ_MP_PARTS_COUNT, "1")
.body(bytes_body(bytes.into_owned().into()))?) .body(bytes_body(bytes.to_vec().into()))?)
} }
ObjectVersionData::FirstBlock(_, _) => { ObjectVersionData::FirstBlock(_, _) => {
let version = garage let version = garage
@ -560,8 +439,7 @@ async fn handle_get_part(
let (begin, end) = let (begin, end) =
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;
let body = let body = body_from_blocks_range(garage, version.blocks.items(), begin, end);
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
Ok(resp_builder Ok(resp_builder
.header(CONTENT_LENGTH, format!("{}", end - begin)) .header(CONTENT_LENGTH, format!("{}", end - begin))
@ -614,23 +492,8 @@ fn calculate_part_bounds(v: &Version, part_number: u64) -> Option<(u64, u64)> {
None None
} }
struct ChecksumMode {
enabled: bool,
}
fn checksum_mode(req: &Request<impl Body>) -> ChecksumMode {
ChecksumMode {
enabled: req
.headers()
.get(X_AMZ_CHECKSUM_MODE)
.map(|x| x == "ENABLED")
.unwrap_or(false),
}
}
fn body_from_blocks_range( fn body_from_blocks_range(
garage: Arc<Garage>, garage: Arc<Garage>,
encryption: EncryptionParams,
all_blocks: &[(VersionBlockKey, VersionBlock)], all_blocks: &[(VersionBlockKey, VersionBlock)],
begin: u64, begin: u64,
end: u64, end: u64,
@ -660,11 +523,12 @@ fn body_from_blocks_range(
tokio::spawn(async move { tokio::spawn(async move {
match async { match async {
let garage = garage.clone();
for (i, (block, block_offset)) in blocks.iter().enumerate() { for (i, (block, block_offset)) in blocks.iter().enumerate() {
let block_stream = encryption let block_stream = garage
.get_block(&garage, &block.hash, Some(order_stream.order(i as u64))) .block_manager
.await?; .rpc_get_block_streaming(&block.hash, Some(order_stream.order(i as u64)))
let block_stream = block_stream .await?
.scan(*block_offset, move |chunk_offset, chunk| { .scan(*block_offset, move |chunk_offset, chunk| {
let r = match chunk { let r = match chunk {
Ok(chunk_bytes) => { Ok(chunk_bytes) => {
@ -724,15 +588,9 @@ fn body_from_blocks_range(
} }
fn response_body_from_block_stream(rx: mpsc::Receiver<ByteStream>) -> ResBody { fn response_body_from_block_stream(rx: mpsc::Receiver<ByteStream>) -> ResBody {
let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx).flatten(); let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx)
response_body_from_stream(body_stream) .flatten()
} .map(|x| {
fn response_body_from_stream<S>(stream: S) -> ResBody
where
S: Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + 'static,
{
let body_stream = stream.map(|x| {
x.map(hyper::body::Frame::data) x.map(hyper::body::Frame::data)
.map_err(|e| Error::from(garage_util::error::Error::from(e))) .map_err(|e| Error::from(garage_util::error::Error::from(e)))
}); });
@ -740,14 +598,9 @@ where
} }
fn error_stream_item<E: std::fmt::Display>(e: E) -> ByteStream { fn error_stream_item<E: std::fmt::Display>(e: E) -> ByteStream {
Box::pin(stream::once(future::ready(Err(std_error_from_read_error( let err = std::io::Error::new(
e,
)))))
}
fn std_error_from_read_error<E: std::fmt::Display>(e: E) -> std::io::Error {
std::io::Error::new(
std::io::ErrorKind::Other, std::io::ErrorKind::Other,
format!("Error while reading object data: {}", e), format!("Error while getting object data: {}", e),
) );
Box::pin(stream::once(future::ready(Err(err))))
} }

View File

@ -5,12 +5,11 @@ use hyper::{Request, Response, StatusCode};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use garage_api_common::helpers::*; use crate::helpers::*;
use garage_api_common::signature::verify_signed_content; use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::api_server::{ReqBody, ResBody}; use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::error::*; use crate::signature::verify_signed_content;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use garage_model::bucket_table::{ use garage_model::bucket_table::{
parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration, parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration,

View File

@ -2,7 +2,7 @@ use std::collections::{BTreeMap, BTreeSet};
use std::iter::{Iterator, Peekable}; use std::iter::{Iterator, Peekable};
use base64::prelude::*; use base64::prelude::*;
use hyper::{Request, Response}; use hyper::Response;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
@ -13,14 +13,12 @@ use garage_model::s3::object_table::*;
use garage_table::EnumerationOrder; use garage_table::EnumerationOrder;
use garage_api_common::encoding::*; use crate::encoding::*;
use garage_api_common::helpers::*; use crate::helpers::*;
use crate::s3::api_server::ResBody;
use crate::api_server::{ReqBody, ResBody}; use crate::s3::error::*;
use crate::encryption::EncryptionParams; use crate::s3::multipart as s3_multipart;
use crate::error::*; use crate::s3::xml as s3_xml;
use crate::multipart as s3_multipart;
use crate::xml as s3_xml;
const DUMMY_NAME: &str = "Dummy Key"; const DUMMY_NAME: &str = "Dummy Key";
const DUMMY_KEY: &str = "GKDummyKey"; const DUMMY_KEY: &str = "GKDummyKey";
@ -273,21 +271,13 @@ pub async fn handle_list_multipart_upload(
pub async fn handle_list_parts( pub async fn handle_list_parts(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>,
query: &ListPartsQuery, query: &ListPartsQuery,
) -> Result<Response<ResBody>, Error> { ) -> Result<Response<ResBody>, Error> {
debug!("ListParts {:?}", query); debug!("ListParts {:?}", query);
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?; let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
let (_, object_version, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?; let (_, _, mpu) = s3_multipart::get_upload(&ctx, &query.key, &upload_id).await?;
let object_encryption = match object_version.state {
ObjectVersionState::Uploading { encryption, .. } => encryption,
_ => unreachable!(),
};
let encryption_res =
EncryptionParams::check_decrypt(&ctx.garage, req.headers(), &object_encryption);
let (info, next) = fetch_part_info(query, &mpu)?; let (info, next) = fetch_part_info(query, &mpu)?;
@ -306,40 +296,11 @@ pub async fn handle_list_parts(
is_truncated: s3_xml::Value(format!("{}", next.is_some())), is_truncated: s3_xml::Value(format!("{}", next.is_some())),
parts: info parts: info
.iter() .iter()
.map(|part| { .map(|part| s3_xml::PartItem {
// hide checksum if object is encrypted and the decryption
// keys are not provided
let checksum = part.checksum.filter(|_| encryption_res.is_ok());
s3_xml::PartItem {
etag: s3_xml::Value(format!("\"{}\"", part.etag)), etag: s3_xml::Value(format!("\"{}\"", part.etag)),
last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)), last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)),
part_number: s3_xml::IntValue(part.part_number as i64), part_number: s3_xml::IntValue(part.part_number as i64),
size: s3_xml::IntValue(part.size as i64), size: s3_xml::IntValue(part.size as i64),
checksum_crc32: match &checksum {
Some(ChecksumValue::Crc32(x)) => {
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
}
_ => None,
},
checksum_crc32c: match &checksum {
Some(ChecksumValue::Crc32c(x)) => {
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
}
_ => None,
},
checksum_sha1: match &checksum {
Some(ChecksumValue::Sha1(x)) => {
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
}
_ => None,
},
checksum_sha256: match &checksum {
Some(ChecksumValue::Sha256(x)) => {
Some(s3_xml::Value(BASE64_STANDARD.encode(&x)))
}
_ => None,
},
}
}) })
.collect(), .collect(),
@ -385,7 +346,6 @@ struct PartInfo<'a> {
timestamp: u64, timestamp: u64,
part_number: u64, part_number: u64,
size: u64, size: u64,
checksum: Option<ChecksumValue>,
} }
enum ExtractionResult { enum ExtractionResult {
@ -399,7 +359,7 @@ enum ExtractionResult {
key: String, key: String,
}, },
// Fallback key is used for legacy APIs that only support // Fallback key is used for legacy APIs that only support
// exclusive pagination (and not inclusive one). // exlusive pagination (and not inclusive one).
SkipTo { SkipTo {
key: String, key: String,
fallback_key: Option<String>, fallback_key: Option<String>,
@ -409,7 +369,7 @@ enum ExtractionResult {
#[derive(PartialEq, Clone, Debug)] #[derive(PartialEq, Clone, Debug)]
enum RangeBegin { enum RangeBegin {
// Fallback key is used for legacy APIs that only support // Fallback key is used for legacy APIs that only support
// exclusive pagination (and not inclusive one). // exlusive pagination (and not inclusive one).
IncludingKey { IncludingKey {
key: String, key: String,
fallback_key: Option<String>, fallback_key: Option<String>,
@ -526,7 +486,6 @@ fn fetch_part_info<'a>(
timestamp: pk.timestamp, timestamp: pk.timestamp,
etag, etag,
size, size,
checksum: p.checksum,
}; };
match parts.last_mut() { match parts.last_mut() {
Some(lastpart) if lastpart.part_number == pk.part_number => { Some(lastpart) if lastpart.part_number == pk.part_number => {
@ -985,14 +944,11 @@ mod tests {
timestamp: TS, timestamp: TS,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading {
multipart: true, multipart: true,
encryption: ObjectVersionEncryption::Plaintext { headers: ObjectVersionHeaders {
inner: ObjectVersionMetaInner { content_type: "text/plain".to_string(),
headers: vec![], other: BTreeMap::<String, String>::new(),
checksum: None,
}, },
}, },
checksum_algorithm: None,
},
} }
} }
@ -1180,7 +1136,6 @@ mod tests {
version: uuid, version: uuid,
size: Some(3), size: Some(3),
etag: Some("etag1".into()), etag: Some("etag1".into()),
checksum: None,
}, },
), ),
( (
@ -1192,7 +1147,6 @@ mod tests {
version: uuid, version: uuid,
size: None, size: None,
etag: None, etag: None,
checksum: None,
}, },
), ),
( (
@ -1204,7 +1158,6 @@ mod tests {
version: uuid, version: uuid,
size: Some(10), size: Some(10),
etag: Some("etag2".into()), etag: Some("etag2".into()),
checksum: None,
}, },
), ),
( (
@ -1216,7 +1169,6 @@ mod tests {
version: uuid, version: uuid,
size: Some(7), size: Some(7),
etag: Some("etag3".into()), etag: Some("etag3".into()),
checksum: None,
}, },
), ),
( (
@ -1228,7 +1180,6 @@ mod tests {
version: uuid, version: uuid,
size: Some(5), size: Some(5),
etag: Some("etag4".into()), etag: Some("etag4".into()),
checksum: None,
}, },
), ),
]; ];
@ -1267,14 +1218,12 @@ mod tests {
etag: "etag1", etag: "etag1",
timestamp: TS, timestamp: TS,
part_number: 1, part_number: 1,
size: 3, size: 3
checksum: None,
}, },
PartInfo { PartInfo {
etag: "etag2", etag: "etag2",
timestamp: TS, timestamp: TS,
part_number: 3, part_number: 3,
checksum: None,
size: 10 size: 10
}, },
] ]
@ -1290,14 +1239,12 @@ mod tests {
PartInfo { PartInfo {
etag: "etag3", etag: "etag3",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 5, part_number: 5,
size: 7 size: 7
}, },
PartInfo { PartInfo {
etag: "etag4", etag: "etag4",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 8, part_number: 8,
size: 5 size: 5
}, },
@ -1321,28 +1268,24 @@ mod tests {
PartInfo { PartInfo {
etag: "etag1", etag: "etag1",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 1, part_number: 1,
size: 3 size: 3
}, },
PartInfo { PartInfo {
etag: "etag2", etag: "etag2",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 3, part_number: 3,
size: 10 size: 10
}, },
PartInfo { PartInfo {
etag: "etag3", etag: "etag3",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 5, part_number: 5,
size: 7 size: 7
}, },
PartInfo { PartInfo {
etag: "etag4", etag: "etag4",
timestamp: TS, timestamp: TS,
checksum: None,
part_number: 8, part_number: 8,
size: 5 size: 5
}, },

View File

@ -1,6 +1,3 @@
#[macro_use]
extern crate tracing;
pub mod api_server; pub mod api_server;
pub mod error; pub mod error;
@ -16,7 +13,5 @@ mod post_object;
mod put; mod put;
mod website; mod website;
mod checksum;
mod encryption;
mod router; mod router;
pub mod xml; pub mod xml;

View File

@ -1,10 +1,9 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use base64::prelude::*;
use futures::prelude::*; use futures::prelude::*;
use hyper::{Request, Response}; use hyper::{Request, Response};
use md5::{Digest as Md5Digest, Md5};
use garage_table::*; use garage_table::*;
use garage_util::data::*; use garage_util::data::*;
@ -15,15 +14,12 @@ use garage_model::s3::mpu_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_api_common::helpers::*; use crate::helpers::*;
use garage_api_common::signature::verify_signed_content; use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::api_server::{ReqBody, ResBody}; use crate::s3::put::*;
use crate::checksum::*; use crate::s3::xml as s3_xml;
use crate::encryption::EncryptionParams; use crate::signature::verify_signed_content;
use crate::error::*;
use crate::put::*;
use crate::xml as s3_xml;
// ---- // ----
@ -44,16 +40,6 @@ pub async fn handle_create_multipart_upload(
let timestamp = next_timestamp(existing_object.as_ref()); let timestamp = next_timestamp(existing_object.as_ref());
let headers = get_headers(req.headers())?; let headers = get_headers(req.headers())?;
let meta = ObjectVersionMetaInner {
headers,
checksum: None,
};
// Determine whether object should be encrypted, and if so the key
let encryption = EncryptionParams::new_from_headers(&garage, req.headers())?;
let object_encryption = encryption.encrypt_meta(meta)?;
let checksum_algorithm = request_checksum_algorithm(req.headers())?;
// Create object in object table // Create object in object table
let object_version = ObjectVersion { let object_version = ObjectVersion {
@ -61,8 +47,7 @@ pub async fn handle_create_multipart_upload(
timestamp, timestamp,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading {
multipart: true, multipart: true,
encryption: object_encryption, headers,
checksum_algorithm,
}, },
}; };
let object = Object::new(*bucket_id, key.to_string(), vec![object_version]); let object = Object::new(*bucket_id, key.to_string(), vec![object_version]);
@ -83,9 +68,7 @@ pub async fn handle_create_multipart_upload(
}; };
let xml = s3_xml::to_xml_with_header(&result)?; let xml = s3_xml::to_xml_with_header(&result)?;
let mut resp = Response::builder(); Ok(Response::new(string_body(xml)))
encryption.add_response_headers(&mut resp);
Ok(resp.body(string_body(xml))?)
} }
pub async fn handle_put_part( pub async fn handle_put_part(
@ -100,37 +83,20 @@ pub async fn handle_put_part(
let upload_id = decode_upload_id(upload_id)?; let upload_id = decode_upload_id(upload_id)?;
let expected_checksums = ExpectedChecksums { let content_md5 = match req.headers().get("content-md5") {
md5: match req.headers().get("content-md5") {
Some(x) => Some(x.to_str()?.to_string()), Some(x) => Some(x.to_str()?.to_string()),
None => None, None => None,
},
sha256: content_sha256,
extra: request_checksum_value(req.headers())?,
}; };
// Read first chuck, and at the same time try to get object to see if it exists // Read first chuck, and at the same time try to get object to see if it exists
let key = key.to_string(); let key = key.to_string();
let (req_head, req_body) = req.into_parts(); let stream = body_stream(req.into_body());
let stream = body_stream(req_body);
let mut chunker = StreamChunker::new(stream, garage.config.block_size); let mut chunker = StreamChunker::new(stream, garage.config.block_size);
let ((_, object_version, mut mpu), first_block) = let ((_, _, mut mpu), first_block) =
futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?; futures::try_join!(get_upload(&ctx, &key, &upload_id), chunker.next(),)?;
// Check encryption params
let (object_encryption, checksum_algorithm) = match object_version.state {
ObjectVersionState::Uploading {
encryption,
checksum_algorithm,
..
} => (encryption, checksum_algorithm),
_ => unreachable!(),
};
let (encryption, _) =
EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?;
// Check object is valid and part can be accepted // Check object is valid and part can be accepted
let first_block = first_block.ok_or_bad_request("Empty body")?; let first_block = first_block.ok_or_bad_request("Empty body")?;
@ -156,9 +122,7 @@ pub async fn handle_put_part(
mpu_part_key, mpu_part_key,
MpuPart { MpuPart {
version: version_uuid, version: version_uuid,
// all these are filled in later, at the end of this function
etag: None, etag: None,
checksum: None,
size: None, size: None,
}, },
); );
@ -172,31 +136,24 @@ pub async fn handle_put_part(
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Copy data to version // Copy data to version
let checksummer = let (total_size, data_md5sum, data_sha256sum, _) =
Checksummer::init(&expected_checksums, !encryption.is_encrypted()).add(checksum_algorithm); read_and_put_blocks(&ctx, &version, part_number, first_block, &mut chunker).await?;
let (total_size, checksums, _) = read_and_put_blocks(
&ctx,
&version,
encryption,
part_number,
first_block,
&mut chunker,
checksummer,
)
.await?;
// Verify that checksums map // Verify that checksums map
checksums.verify(&expected_checksums)?; ensure_checksum_matches(
data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
// Store part etag in version // Store part etag in version
let etag = encryption.etag_from_md5(&checksums.md5); let data_md5sum_hex = hex::encode(data_md5sum);
mpu.parts.put( mpu.parts.put(
mpu_part_key, mpu_part_key,
MpuPart { MpuPart {
version: version_uuid, version: version_uuid,
etag: Some(etag.clone()), etag: Some(data_md5sum_hex.clone()),
checksum: checksums.extract(checksum_algorithm),
size: Some(total_size), size: Some(total_size),
}, },
); );
@ -206,10 +163,11 @@ pub async fn handle_put_part(
// We won't have to clean up on drop. // We won't have to clean up on drop.
interrupted_cleanup.cancel(); interrupted_cleanup.cancel();
let mut resp = Response::builder().header("ETag", format!("\"{}\"", etag)); let response = Response::builder()
encryption.add_response_headers(&mut resp); .header("ETag", format!("\"{}\"", data_md5sum_hex))
let resp = add_checksum_response_headers(&expected_checksums.extra, resp); .body(empty_body())
Ok(resp.body(empty_body())?) .unwrap();
Ok(response)
} }
struct InterruptedCleanup(Option<InterruptedCleanupInner>); struct InterruptedCleanup(Option<InterruptedCleanupInner>);
@ -256,11 +214,10 @@ pub async fn handle_complete_multipart_upload(
bucket_name, bucket_name,
.. ..
} = &ctx; } = &ctx;
let (req_head, req_body) = req.into_parts();
let expected_checksum = request_checksum_value(&req_head.headers)?; let body = http_body_util::BodyExt::collect(req.into_body())
.await?
let body = http_body_util::BodyExt::collect(req_body).await?.to_bytes(); .to_bytes();
if let Some(content_sha256) = content_sha256 { if let Some(content_sha256) = content_sha256 {
verify_signed_content(content_sha256, &body[..])?; verify_signed_content(content_sha256, &body[..])?;
@ -284,12 +241,8 @@ pub async fn handle_complete_multipart_upload(
return Err(Error::bad_request("No data was uploaded")); return Err(Error::bad_request("No data was uploaded"));
} }
let (object_encryption, checksum_algorithm) = match object_version.state { let headers = match object_version.state {
ObjectVersionState::Uploading { ObjectVersionState::Uploading { headers, .. } => headers,
encryption,
checksum_algorithm,
..
} => (encryption, checksum_algorithm),
_ => unreachable!(), _ => unreachable!(),
}; };
@ -317,13 +270,6 @@ pub async fn handle_complete_multipart_upload(
for req_part in body_list_of_parts.iter() { for req_part in body_list_of_parts.iter() {
match have_parts.get(&req_part.part_number) { match have_parts.get(&req_part.part_number) {
Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => { Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => {
// alternative version: if req_part.checksum.is_some() && part.checksum != req_part.checksum {
if part.checksum != req_part.checksum {
return Err(Error::InvalidDigest(format!(
"Invalid checksum for part {}: in request = {:?}, uploaded part = {:?}",
req_part.part_number, req_part.checksum, part.checksum
)));
}
parts.push(*part) parts.push(*part)
} }
_ => return Err(Error::InvalidPart), _ => return Err(Error::InvalidPart),
@ -371,23 +317,18 @@ pub async fn handle_complete_multipart_upload(
}); });
garage.block_ref_table.insert_many(block_refs).await?; garage.block_ref_table.insert_many(block_refs).await?;
// Calculate checksum and etag of final object // Calculate etag of final object
// To understand how etags are calculated, read more here: // To understand how etags are calculated, read more here:
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
// https://teppen.io/2018/06/23/aws_s3_etags/ // https://teppen.io/2018/06/23/aws_s3_etags/
let mut checksummer = MultipartChecksummer::init(checksum_algorithm); let mut etag_md5_hasher = Md5::new();
for part in parts.iter() { for part in parts.iter() {
checksummer.update(part.etag.as_ref().unwrap(), part.checksum)?; etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes());
} }
let (checksum_md5, checksum_extra) = checksummer.finalize(); let etag = format!(
"{}-{}",
if expected_checksum.is_some() && checksum_extra != expected_checksum { hex::encode(etag_md5_hasher.finalize()),
return Err(Error::InvalidDigest( parts.len()
"Failed to validate x-amz-checksum-*".into(), );
));
}
let etag = format!("{}-{}", hex::encode(&checksum_md5[..]), parts.len());
// Calculate total size of final object // Calculate total size of final object
let total_size = parts.iter().map(|x| x.size.unwrap()).sum(); let total_size = parts.iter().map(|x| x.size.unwrap()).sum();
@ -400,24 +341,10 @@ pub async fn handle_complete_multipart_upload(
return Err(e); return Err(e);
} }
// If there is a checksum algorithm, update metadata with checksum
let object_encryption = match checksum_algorithm {
None => object_encryption,
Some(_) => {
let (encryption, meta) =
EncryptionParams::check_decrypt(&garage, &req_head.headers, &object_encryption)?;
let new_meta = ObjectVersionMetaInner {
headers: meta.into_owned().headers,
checksum: checksum_extra,
};
encryption.encrypt_meta(new_meta)?
}
};
// Write final object version // Write final object version
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta { ObjectVersionMeta {
encryption: object_encryption, headers,
size: total_size, size: total_size,
etag: etag.clone(), etag: etag.clone(),
}, },
@ -434,28 +361,10 @@ pub async fn handle_complete_multipart_upload(
bucket: s3_xml::Value(bucket_name.to_string()), bucket: s3_xml::Value(bucket_name.to_string()),
key: s3_xml::Value(key), key: s3_xml::Value(key),
etag: s3_xml::Value(format!("\"{}\"", etag)), etag: s3_xml::Value(format!("\"{}\"", etag)),
checksum_crc32: match &checksum_extra {
Some(ChecksumValue::Crc32(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
_ => None,
},
checksum_crc32c: match &checksum_extra {
Some(ChecksumValue::Crc32c(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
_ => None,
},
checksum_sha1: match &checksum_extra {
Some(ChecksumValue::Sha1(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
_ => None,
},
checksum_sha256: match &checksum_extra {
Some(ChecksumValue::Sha256(x)) => Some(s3_xml::Value(BASE64_STANDARD.encode(&x))),
_ => None,
},
}; };
let xml = s3_xml::to_xml_with_header(&result)?; let xml = s3_xml::to_xml_with_header(&result)?;
let resp = Response::builder(); Ok(Response::new(string_body(xml)))
let resp = add_checksum_response_headers(&expected_checksum, resp);
Ok(resp.body(string_body(xml))?)
} }
pub async fn handle_abort_multipart_upload( pub async fn handle_abort_multipart_upload(
@ -524,7 +433,6 @@ pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> {
struct CompleteMultipartUploadPart { struct CompleteMultipartUploadPart {
etag: String, etag: String,
part_number: u64, part_number: u64,
checksum: Option<ChecksumValue>,
} }
fn parse_complete_multipart_upload_body( fn parse_complete_multipart_upload_body(
@ -550,41 +458,9 @@ fn parse_complete_multipart_upload_body(
.children() .children()
.find(|e| e.has_tag_name("PartNumber"))? .find(|e| e.has_tag_name("PartNumber"))?
.text()?; .text()?;
let checksum = if let Some(crc32) =
item.children().find(|e| e.has_tag_name("ChecksumCRC32"))
{
Some(ChecksumValue::Crc32(
BASE64_STANDARD.decode(crc32.text()?).ok()?[..]
.try_into()
.ok()?,
))
} else if let Some(crc32c) = item.children().find(|e| e.has_tag_name("ChecksumCRC32C"))
{
Some(ChecksumValue::Crc32c(
BASE64_STANDARD.decode(crc32c.text()?).ok()?[..]
.try_into()
.ok()?,
))
} else if let Some(sha1) = item.children().find(|e| e.has_tag_name("ChecksumSHA1")) {
Some(ChecksumValue::Sha1(
BASE64_STANDARD.decode(sha1.text()?).ok()?[..]
.try_into()
.ok()?,
))
} else if let Some(sha256) = item.children().find(|e| e.has_tag_name("ChecksumSHA256"))
{
Some(ChecksumValue::Sha256(
BASE64_STANDARD.decode(sha256.text()?).ok()?[..]
.try_into()
.ok()?,
))
} else {
None
};
parts.push(CompleteMultipartUploadPart { parts.push(CompleteMultipartUploadPart {
etag: etag.trim_matches('"').to_string(), etag: etag.trim_matches('"').to_string(),
part_number: part_number.parse().ok()?, part_number: part_number.parse().ok()?,
checksum,
}); });
} else { } else {
return None; return None;

View File

@ -14,18 +14,14 @@ use multer::{Constraints, Multipart, SizeLimit};
use serde::Deserialize; use serde::Deserialize;
use garage_model::garage::Garage; use garage_model::garage::Garage;
use garage_model::s3::object_table::*;
use garage_api_common::cors::*; use crate::helpers::*;
use garage_api_common::helpers::*; use crate::s3::api_server::ResBody;
use garage_api_common::signature::payload::{verify_v4, Authorization}; use crate::s3::cors::*;
use crate::s3::error::*;
use crate::api_server::ResBody; use crate::s3::put::{get_headers, save_stream};
use crate::checksum::*; use crate::s3::xml as s3_xml;
use crate::encryption::EncryptionParams; use crate::signature::payload::{verify_v4, Authorization};
use crate::error::*;
use crate::put::{get_headers, save_stream, ChecksumMode};
use crate::xml as s3_xml;
pub async fn handle_post_object( pub async fn handle_post_object(
garage: Arc<Garage>, garage: Arc<Garage>,
@ -52,17 +48,13 @@ pub async fn handle_post_object(
let mut multipart = Multipart::with_constraints(stream, boundary, constraints); let mut multipart = Multipart::with_constraints(stream, boundary, constraints);
let mut params = HeaderMap::new(); let mut params = HeaderMap::new();
let file_field = loop { let field = loop {
let field = if let Some(field) = multipart.next_field().await? { let field = if let Some(field) = multipart.next_field().await? {
field field
} else { } else {
return Err(Error::bad_request("Request did not contain a file")); return Err(Error::bad_request("Request did not contain a file"));
}; };
let name: HeaderName = if let Some(Ok(name)) = field let name: HeaderName = if let Some(Ok(name)) = field.name().map(TryInto::try_into) {
.name()
.map(str::to_ascii_lowercase)
.map(TryInto::try_into)
{
name name
} else { } else {
continue; continue;
@ -72,6 +64,14 @@ pub async fn handle_post_object(
} }
if let Ok(content) = HeaderValue::from_str(&field.text().await?) { if let Ok(content) = HeaderValue::from_str(&field.text().await?) {
match name.as_str() {
"tag" => (/* tag need to be reencoded, but we don't support them yet anyway */),
"acl" => {
if params.insert("x-amz-acl", content).is_some() {
return Err(Error::bad_request("Field 'acl' provided more than once"));
}
}
_ => {
if params.insert(&name, content).is_some() { if params.insert(&name, content).is_some() {
return Err(Error::bad_request(format!( return Err(Error::bad_request(format!(
"Field '{}' provided more than once", "Field '{}' provided more than once",
@ -79,6 +79,8 @@ pub async fn handle_post_object(
))); )));
} }
} }
}
}
}; };
// Current part is file. Do some checks before handling to PutObject code // Current part is file. Do some checks before handling to PutObject code
@ -94,7 +96,7 @@ pub async fn handle_post_object(
let key = if key.contains("${filename}") { let key = if key.contains("${filename}") {
// if no filename is provided, don't replace. This matches the behavior of AWS. // if no filename is provided, don't replace. This matches the behavior of AWS.
if let Some(filename) = file_field.file_name() { if let Some(filename) = field.file_name() {
key.replace("${filename}", filename) key.replace("${filename}", filename)
} else { } else {
key.to_owned() key.to_owned()
@ -108,8 +110,7 @@ pub async fn handle_post_object(
let bucket_id = garage let bucket_id = garage
.bucket_helper() .bucket_helper()
.resolve_bucket(&bucket_name, &api_key) .resolve_bucket(&bucket_name, &api_key)
.await .await?;
.map_err(pass_helper_error)?;
if !api_key.allow_write(&bucket_id) { if !api_key.allow_write(&bucket_id) {
return Err(Error::forbidden("Operation is not allowed for this key.")); return Err(Error::forbidden("Operation is not allowed for this key."));
@ -142,8 +143,9 @@ pub async fn handle_post_object(
let mut conditions = decoded_policy.into_conditions()?; let mut conditions = decoded_policy.into_conditions()?;
for (param_key, value) in params.iter() { for (param_key, value) in params.iter() {
let param_key = param_key.as_str(); let mut param_key = param_key.to_string();
match param_key { param_key.make_ascii_lowercase();
match param_key.as_str() {
"policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields "policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields
"content-type" => { "content-type" => {
let conds = conditions.params.remove("content-type").ok_or_else(|| { let conds = conditions.params.remove("content-type").ok_or_else(|| {
@ -188,7 +190,7 @@ pub async fn handle_post_object(
// how aws seems to behave. // how aws seems to behave.
continue; continue;
} }
let conds = conditions.params.remove(param_key).ok_or_else(|| { let conds = conditions.params.remove(&param_key).ok_or_else(|| {
Error::bad_request(format!("Key '{}' is not allowed in policy", param_key)) Error::bad_request(format!("Key '{}' is not allowed in policy", param_key))
})?; })?;
for cond in conds { for cond in conds {
@ -214,28 +216,10 @@ pub async fn handle_post_object(
))); )));
} }
// if we ever start supporting ACLs, we likely want to map "acl" to x-amz-acl" somewhere
// around here to make sure the rest of the machinery takes our acl into account.
let headers = get_headers(&params)?; let headers = get_headers(&params)?;
let expected_checksums = ExpectedChecksums { let stream = field.map(|r| r.map_err(Into::into));
md5: params
.get("content-md5")
.map(HeaderValue::to_str)
.transpose()?
.map(str::to_string),
sha256: None,
extra: request_checksum_algorithm_value(&params)?,
};
let meta = ObjectVersionMetaInner {
headers,
checksum: expected_checksums.extra,
};
let encryption = EncryptionParams::new_from_headers(&garage, &params)?;
let stream = file_field.map(|r| r.map_err(Into::into));
let ctx = ReqCtx { let ctx = ReqCtx {
garage, garage,
bucket_id, bucket_id,
@ -244,17 +228,17 @@ pub async fn handle_post_object(
api_key, api_key,
}; };
let res = save_stream( let (_, md5) = save_stream(
&ctx, &ctx,
meta, headers,
encryption,
StreamLimiter::new(stream, conditions.content_length), StreamLimiter::new(stream, conditions.content_length),
&key, &key,
ChecksumMode::Verify(&expected_checksums), None,
None,
) )
.await?; .await?;
let etag = format!("\"{}\"", res.etag); let etag = format!("\"{}\"", md5);
let mut resp = if let Some(mut target) = params let mut resp = if let Some(mut target) = params
.get("success_action_redirect") .get("success_action_redirect")
@ -268,12 +252,11 @@ pub async fn handle_post_object(
.append_pair("key", &key) .append_pair("key", &key)
.append_pair("etag", &etag); .append_pair("etag", &etag);
let target = target.to_string(); let target = target.to_string();
let mut resp = Response::builder() Response::builder()
.status(StatusCode::SEE_OTHER) .status(StatusCode::SEE_OTHER)
.header(header::LOCATION, target.clone()) .header(header::LOCATION, target.clone())
.header(header::ETAG, etag); .header(header::ETAG, etag)
encryption.add_response_headers(&mut resp); .body(string_body(target))?
resp.body(string_body(target))?
} else { } else {
let path = head let path = head
.uri .uri
@ -300,10 +283,9 @@ pub async fn handle_post_object(
.get("success_action_status") .get("success_action_status")
.and_then(|h| h.to_str().ok()) .and_then(|h| h.to_str().ok())
.unwrap_or("204"); .unwrap_or("204");
let mut builder = Response::builder() let builder = Response::builder()
.header(header::LOCATION, location.clone()) .header(header::LOCATION, location.clone())
.header(header::ETAG, etag.clone()); .header(header::ETAG, etag.clone());
encryption.add_response_headers(&mut builder);
match action { match action {
"200" => builder.status(StatusCode::OK).body(empty_body())?, "200" => builder.status(StatusCode::OK).body(empty_body())?,
"201" => { "201" => {

View File

@ -1,9 +1,12 @@
use std::collections::HashMap; use std::collections::{BTreeMap, HashMap};
use std::sync::Arc; use std::sync::Arc;
use base64::prelude::*;
use futures::prelude::*; use futures::prelude::*;
use futures::stream::FuturesOrdered; use futures::stream::FuturesOrdered;
use futures::try_join; use futures::try_join;
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
use sha2::Sha256;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -19,6 +22,7 @@ use opentelemetry::{
use garage_net::bytes_buf::BytesBuf; use garage_net::bytes_buf::BytesBuf;
use garage_rpc::rpc_helper::OrderTag; use garage_rpc::rpc_helper::OrderTag;
use garage_table::*; use garage_table::*;
use garage_util::async_hash::*;
use garage_util::data::*; use garage_util::data::*;
use garage_util::error::Error as GarageError; use garage_util::error::Error as GarageError;
use garage_util::time::*; use garage_util::time::*;
@ -30,27 +34,12 @@ use garage_model::s3::block_ref_table::*;
use garage_model::s3::object_table::*; use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*; use garage_model::s3::version_table::*;
use garage_api_common::helpers::*; use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::api_server::{ReqBody, ResBody}; use crate::s3::error::*;
use crate::checksum::*;
use crate::encryption::EncryptionParams;
use crate::error::*;
const PUT_BLOCKS_MAX_PARALLEL: usize = 3; const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
pub(crate) struct SaveStreamResult {
pub(crate) version_uuid: Uuid,
pub(crate) version_timestamp: u64,
/// Etag WITHOUT THE QUOTES (just the hex value)
pub(crate) etag: String,
}
pub(crate) enum ChecksumMode<'a> {
Verify(&'a ExpectedChecksums),
Calculate(Option<ChecksumAlgorithm>),
}
pub async fn handle_put( pub async fn handle_put(
ctx: ReqCtx, ctx: ReqCtx,
req: Request<ReqBody>, req: Request<ReqBody>,
@ -61,51 +50,26 @@ pub async fn handle_put(
let headers = get_headers(req.headers())?; let headers = get_headers(req.headers())?;
debug!("Object headers: {:?}", headers); debug!("Object headers: {:?}", headers);
let expected_checksums = ExpectedChecksums { let content_md5 = match req.headers().get("content-md5") {
md5: match req.headers().get("content-md5") {
Some(x) => Some(x.to_str()?.to_string()), Some(x) => Some(x.to_str()?.to_string()),
None => None, None => None,
},
sha256: content_sha256,
extra: request_checksum_value(req.headers())?,
}; };
let meta = ObjectVersionMetaInner {
headers,
checksum: expected_checksums.extra,
};
// Determine whether object should be encrypted, and if so the key
let encryption = EncryptionParams::new_from_headers(&ctx.garage, req.headers())?;
let stream = body_stream(req.into_body()); let stream = body_stream(req.into_body());
let res = save_stream( save_stream(&ctx, headers, stream, key, content_md5, content_sha256)
&ctx, .await
meta, .map(|(uuid, md5)| put_response(uuid, md5))
encryption,
stream,
key,
ChecksumMode::Verify(&expected_checksums),
)
.await?;
let mut resp = Response::builder()
.header("x-amz-version-id", hex::encode(res.version_uuid))
.header("ETag", format!("\"{}\"", res.etag));
encryption.add_response_headers(&mut resp);
let resp = add_checksum_response_headers(&expected_checksums.extra, resp);
Ok(resp.body(empty_body())?)
} }
pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>( pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
ctx: &ReqCtx, ctx: &ReqCtx,
mut meta: ObjectVersionMetaInner, headers: ObjectVersionHeaders,
encryption: EncryptionParams,
body: S, body: S,
key: &String, key: &String,
checksum_mode: ChecksumMode<'_>, content_md5: Option<String>,
) -> Result<SaveStreamResult, Error> { content_sha256: Option<FixedBytes32>,
) -> Result<(Uuid, String), Error> {
let ReqCtx { let ReqCtx {
garage, bucket_id, .. garage, bucket_id, ..
} = ctx; } = ctx;
@ -122,55 +86,43 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
let version_uuid = gen_uuid(); let version_uuid = gen_uuid();
let version_timestamp = next_timestamp(existing_object.as_ref()); let version_timestamp = next_timestamp(existing_object.as_ref());
let mut checksummer = match checksum_mode {
ChecksumMode::Verify(expected) => Checksummer::init(expected, !encryption.is_encrypted()),
ChecksumMode::Calculate(algo) => {
Checksummer::init(&Default::default(), !encryption.is_encrypted()).add(algo)
}
};
// If body is small enough, store it directly in the object table // If body is small enough, store it directly in the object table
// as "inline data". We can then return immediately. // as "inline data". We can then return immediately.
if first_block.len() < INLINE_THRESHOLD { if first_block.len() < INLINE_THRESHOLD {
checksummer.update(&first_block); let mut md5sum = Md5::new();
let checksums = checksummer.finalize(); md5sum.update(&first_block[..]);
let data_md5sum = md5sum.finalize();
match checksum_mode { let data_md5sum_hex = hex::encode(data_md5sum);
ChecksumMode::Verify(expected) => {
checksums.verify(&expected)?;
}
ChecksumMode::Calculate(algo) => {
meta.checksum = checksums.extract(algo);
}
};
let data_sha256sum = sha256sum(&first_block[..]);
let size = first_block.len() as u64; let size = first_block.len() as u64;
check_quotas(ctx, size, existing_object.as_ref()).await?;
let etag = encryption.etag_from_md5(&checksums.md5); ensure_checksum_matches(
let inline_data = encryption.encrypt_blob(&first_block)?.to_vec(); data_md5sum.as_slice(),
data_sha256sum,
content_md5.as_deref(),
content_sha256,
)?;
check_quotas(ctx, size, existing_object.as_ref()).await?;
let object_version = ObjectVersion { let object_version = ObjectVersion {
uuid: version_uuid, uuid: version_uuid,
timestamp: version_timestamp, timestamp: version_timestamp,
state: ObjectVersionState::Complete(ObjectVersionData::Inline( state: ObjectVersionState::Complete(ObjectVersionData::Inline(
ObjectVersionMeta { ObjectVersionMeta {
encryption: encryption.encrypt_meta(meta)?, headers,
size, size,
etag: etag.clone(), etag: data_md5sum_hex.clone(),
}, },
inline_data, first_block.to_vec(),
)), )),
}; };
let object = Object::new(*bucket_id, key.into(), vec![object_version]); let object = Object::new(*bucket_id, key.into(), vec![object_version]);
garage.object_table.insert(&object).await?; garage.object_table.insert(&object).await?;
return Ok(SaveStreamResult { return Ok((version_uuid, data_md5sum_hex));
version_uuid,
version_timestamp,
etag,
});
} }
// The following consists in many steps that can each fail. // The following consists in many steps that can each fail.
@ -190,8 +142,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
uuid: version_uuid, uuid: version_uuid,
timestamp: version_timestamp, timestamp: version_timestamp,
state: ObjectVersionState::Uploading { state: ObjectVersionState::Uploading {
encryption: encryption.encrypt_meta(meta.clone())?, headers: headers.clone(),
checksum_algorithm: None, // don't care; overwritten later
multipart: false, multipart: false,
}, },
}; };
@ -212,39 +163,26 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
); );
garage.version_table.insert(&version).await?; garage.version_table.insert(&version).await?;
// Transfer data // Transfer data and verify checksum
let (total_size, checksums, first_block_hash) = read_and_put_blocks( let (total_size, data_md5sum, data_sha256sum, first_block_hash) =
ctx, read_and_put_blocks(ctx, &version, 1, first_block, &mut chunker).await?;
&version,
encryption,
1,
first_block,
&mut chunker,
checksummer,
)
.await?;
// Verify checksums are ok / add calculated checksum to metadata ensure_checksum_matches(
match checksum_mode { data_md5sum.as_slice(),
ChecksumMode::Verify(expected) => { data_sha256sum,
checksums.verify(&expected)?; content_md5.as_deref(),
} content_sha256,
ChecksumMode::Calculate(algo) => { )?;
meta.checksum = checksums.extract(algo);
}
};
// Verify quotas are respsected
check_quotas(ctx, total_size, existing_object.as_ref()).await?; check_quotas(ctx, total_size, existing_object.as_ref()).await?;
// Save final object state, marked as Complete // Save final object state, marked as Complete
let etag = encryption.etag_from_md5(&checksums.md5); let md5sum_hex = hex::encode(data_md5sum);
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta { ObjectVersionMeta {
encryption: encryption.encrypt_meta(meta)?, headers,
size: total_size, size: total_size,
etag: etag.clone(), etag: md5sum_hex.clone(),
}, },
first_block_hash, first_block_hash,
)); ));
@ -255,11 +193,34 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
// We won't have to clean up on drop. // We won't have to clean up on drop.
interrupted_cleanup.cancel(); interrupted_cleanup.cancel();
Ok(SaveStreamResult { Ok((version_uuid, md5sum_hex))
version_uuid, }
version_timestamp,
etag, /// Validate MD5 sum against content-md5 header
}) /// and sha256sum against signed content-sha256
pub(crate) fn ensure_checksum_matches(
data_md5sum: &[u8],
data_sha256sum: garage_util::data::FixedBytes32,
content_md5: Option<&str>,
content_sha256: Option<garage_util::data::FixedBytes32>,
) -> Result<(), Error> {
if let Some(expected_sha256) = content_sha256 {
if expected_sha256 != data_sha256sum {
return Err(Error::bad_request(
"Unable to validate x-amz-content-sha256",
));
} else {
trace!("Successfully validated x-amz-content-sha256");
}
}
if let Some(expected_md5) = content_md5 {
if expected_md5.trim_matches('"') != BASE64_STANDARD.encode(data_md5sum) {
return Err(Error::bad_request("Unable to validate content-md5"));
} else {
trace!("Successfully validated content-md5");
}
}
Ok(())
} }
/// Check that inserting this object with this size doesn't exceed bucket quotas /// Check that inserting this object with this size doesn't exceed bucket quotas
@ -287,7 +248,7 @@ pub(crate) async fn check_quotas(
.await?; .await?;
let counters = counters let counters = counters
.map(|x| x.filtered_values(&garage.system.cluster_layout())) .map(|x| x.filtered_values(&garage.system.ring.borrow()))
.unwrap_or_default(); .unwrap_or_default();
let (prev_cnt_obj, prev_cnt_size) = match prev_object { let (prev_cnt_obj, prev_cnt_size) = match prev_object {
@ -329,12 +290,10 @@ pub(crate) async fn check_quotas(
pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>( pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
ctx: &ReqCtx, ctx: &ReqCtx,
version: &Version, version: &Version,
encryption: EncryptionParams,
part_number: u64, part_number: u64,
first_block: Bytes, first_block: Bytes,
chunker: &mut StreamChunker<S>, chunker: &mut StreamChunker<S>,
checksummer: Checksummer, ) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash, Hash), Error> {
) -> Result<(u64, Checksums, Hash), Error> {
let tracer = opentelemetry::global::tracer("garage"); let tracer = opentelemetry::global::tracer("garage");
let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2); let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
@ -362,20 +321,20 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1); let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
let hash_stream = async { let hash_stream = async {
let mut checksummer = checksummer; let md5hasher = AsyncHasher::<Md5>::new();
let sha256hasher = AsyncHasher::<Sha256>::new();
while let Some(next) = block_rx.recv().await { while let Some(next) = block_rx.recv().await {
match next { match next {
Ok(block) => { Ok(block) => {
block_tx2.send(Ok(block.clone())).await?; block_tx2.send(Ok(block.clone())).await?;
checksummer = tokio::task::spawn_blocking(move || { futures::future::join(
checksummer.update(&block); md5hasher.update(block.clone()),
checksummer sha256hasher.update(block.clone()),
}) )
.with_context(Context::current_with_span( .with_context(Context::current_with_span(
tracer.start("Hash block (md5, sha256)"), tracer.start("Hash block (md5, sha256)"),
)) ))
.await .await;
.unwrap()
} }
Err(e) => { Err(e) => {
block_tx2.send(Err(e)).await?; block_tx2.send(Err(e)).await?;
@ -384,38 +343,27 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
} }
} }
drop(block_tx2); drop(block_tx2);
Ok::<_, mpsc::error::SendError<_>>(checksummer) Ok::<_, mpsc::error::SendError<_>>(futures::join!(
md5hasher.finalize(),
sha256hasher.finalize()
))
}; };
let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, u64, Hash), Error>>(1); let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, Hash), Error>>(1);
let encrypt_hash_blocks = async { let hash_blocks = async {
let mut first_block_hash = None; let mut first_block_hash = None;
while let Some(next) = block_rx2.recv().await { while let Some(next) = block_rx2.recv().await {
match next { match next {
Ok(block) => { Ok(block) => {
let unencrypted_len = block.len() as u64; let hash = async_blake2sum(block.clone())
let res = tokio::task::spawn_blocking(move || {
let block = encryption.encrypt_block(block)?;
let hash = blake2sum(&block);
Ok((block, hash))
})
.with_context(Context::current_with_span( .with_context(Context::current_with_span(
tracer.start("Encrypt and hash (blake2) block"), tracer.start("Hash block (blake2)"),
)) ))
.await .await;
.unwrap();
match res {
Ok((block, hash)) => {
if first_block_hash.is_none() { if first_block_hash.is_none() {
first_block_hash = Some(hash); first_block_hash = Some(hash);
} }
block_tx3.send(Ok((block, unencrypted_len, hash))).await?; block_tx3.send(Ok((block, hash))).await?;
}
Err(e) => {
block_tx3.send(Err(e)).await?;
break;
}
}
} }
Err(e) => { Err(e) => {
block_tx3.send(Err(e)).await?; block_tx3.send(Err(e)).await?;
@ -450,7 +398,7 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
block_rx3.recv().await block_rx3.recv().await
} }
}; };
let (block, unencrypted_len, hash) = tokio::select! { let (block, hash) = tokio::select! {
result = write_futs_next => { result = write_futs_next => {
result?; result?;
continue; continue;
@ -462,18 +410,17 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
}; };
// For next block to be written: count its size and spawn future to write it // For next block to be written: count its size and spawn future to write it
let offset = written_bytes;
written_bytes += block.len() as u64;
write_futs.push_back(put_block_and_meta( write_futs.push_back(put_block_and_meta(
ctx, ctx,
version, version,
part_number, part_number,
written_bytes, offset,
hash, hash,
block, block,
unencrypted_len,
encryption.is_encrypted(),
order_stream.order(written_bytes), order_stream.order(written_bytes),
)); ));
written_bytes += unencrypted_len;
} }
while let Some(res) = write_futs.next().await { while let Some(res) = write_futs.next().await {
res?; res?;
@ -482,15 +429,17 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
}; };
let (_, stream_hash_result, block_hash_result, final_result) = let (_, stream_hash_result, block_hash_result, final_result) =
futures::join!(read_blocks, hash_stream, encrypt_hash_blocks, put_blocks); futures::join!(read_blocks, hash_stream, hash_blocks, put_blocks);
let total_size = final_result?; let total_size = final_result?;
// unwrap here is ok, because if hasher failed, it is because something failed // unwrap here is ok, because if hasher failed, it is because something failed
// later in the pipeline which already caused a return at the ? on previous line // later in the pipeline which already caused a return at the ? on previous line
let (data_md5sum, data_sha256sum) = stream_hash_result.unwrap();
let first_block_hash = block_hash_result.unwrap(); let first_block_hash = block_hash_result.unwrap();
let checksums = stream_hash_result.unwrap().finalize();
Ok((total_size, checksums, first_block_hash)) let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap();
Ok((total_size, data_md5sum, data_sha256sum, first_block_hash))
} }
async fn put_block_and_meta( async fn put_block_and_meta(
@ -500,8 +449,6 @@ async fn put_block_and_meta(
offset: u64, offset: u64,
hash: Hash, hash: Hash,
block: Bytes, block: Bytes,
size: u64,
is_encrypted: bool,
order_tag: OrderTag, order_tag: OrderTag,
) -> Result<(), GarageError> { ) -> Result<(), GarageError> {
let ReqCtx { garage, .. } = ctx; let ReqCtx { garage, .. } = ctx;
@ -512,7 +459,10 @@ async fn put_block_and_meta(
part_number, part_number,
offset, offset,
}, },
VersionBlock { hash, size }, VersionBlock {
hash,
size: block.len() as u64,
},
); );
let block_ref = BlockRef { let block_ref = BlockRef {
@ -524,7 +474,7 @@ async fn put_block_and_meta(
futures::try_join!( futures::try_join!(
garage garage
.block_manager .block_manager
.rpc_put_block(hash, block, is_encrypted, Some(order_tag)), .rpc_put_block(hash, block, Some(order_tag)),
garage.version_table.insert(&version), garage.version_table.insert(&version),
garage.block_ref_table.insert(&block_ref), garage.block_ref_table.insert(&block_ref),
)?; )?;
@ -567,6 +517,14 @@ impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
} }
} }
pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<ResBody> {
Response::builder()
.header("x-amz-version-id", hex::encode(version_uuid))
.header("ETag", format!("\"{}\"", md5sum_hex))
.body(empty_body())
.unwrap()
}
struct InterruptedCleanup(Option<InterruptedCleanupInner>); struct InterruptedCleanup(Option<InterruptedCleanupInner>);
struct InterruptedCleanupInner { struct InterruptedCleanupInner {
garage: Arc<Garage>, garage: Arc<Garage>,
@ -601,35 +559,57 @@ impl Drop for InterruptedCleanup {
// ============ helpers ============ // ============ helpers ============
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<HeaderList, Error> { pub(crate) fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> {
let mut ret = Vec::new(); Ok(headers
.get(hyper::header::CONTENT_TYPE)
.map(|x| x.to_str())
.unwrap_or(Ok("blob"))?
.to_string())
}
pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVersionHeaders, Error> {
let content_type = get_mime_type(headers)?;
let mut other = BTreeMap::new();
// Preserve standard headers // Preserve standard headers
let standard_header = vec![ let standard_header = vec![
hyper::header::CONTENT_TYPE,
hyper::header::CACHE_CONTROL, hyper::header::CACHE_CONTROL,
hyper::header::CONTENT_DISPOSITION, hyper::header::CONTENT_DISPOSITION,
hyper::header::CONTENT_ENCODING, hyper::header::CONTENT_ENCODING,
hyper::header::CONTENT_LANGUAGE, hyper::header::CONTENT_LANGUAGE,
hyper::header::EXPIRES, hyper::header::EXPIRES,
]; ];
for name in standard_header.iter() { for h in standard_header.iter() {
if let Some(value) = headers.get(name) { if let Some(v) = headers.get(h) {
ret.push((name.to_string(), value.to_str()?.to_string())); match v.to_str() {
Ok(v_str) => {
other.insert(h.to_string(), v_str.to_string());
}
Err(e) => {
warn!("Discarding header {}, error in .to_str(): {}", h, e);
}
}
} }
} }
// Preserve x-amz-meta- headers // Preserve x-amz-meta- headers
for (name, value) in headers.iter() { for (k, v) in headers.iter() {
if name.as_str().starts_with("x-amz-meta-") { if k.as_str().starts_with("x-amz-meta-") {
ret.push(( match std::str::from_utf8(v.as_bytes()) {
name.as_str().to_ascii_lowercase(), Ok(v_str) => {
std::str::from_utf8(value.as_bytes())?.to_string(), other.insert(k.to_string(), v_str.to_string());
)); }
Err(e) => {
warn!("Discarding header {}, error in .to_str(): {}", k, e);
}
}
} }
} }
Ok(ret) Ok(ObjectVersionHeaders {
content_type,
other,
})
} }
pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 { pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 {

View File

@ -3,10 +3,9 @@ use std::borrow::Cow;
use hyper::header::HeaderValue; use hyper::header::HeaderValue;
use hyper::{HeaderMap, Method, Request}; use hyper::{HeaderMap, Method, Request};
use garage_api_common::helpers::Authorization; use crate::helpers::Authorization;
use garage_api_common::router_macros::{generateQueryParameters, router_match}; use crate::router_macros::{generateQueryParameters, router_match};
use crate::s3::error::*;
use crate::error::*;
router_match! {@func router_match! {@func

View File

@ -4,16 +4,15 @@ use http_body_util::BodyExt;
use hyper::{Request, Response, StatusCode}; use hyper::{Request, Response, StatusCode};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
use crate::signature::verify_signed_content;
use garage_model::bucket_table::*; use garage_model::bucket_table::*;
use garage_util::data::*; use garage_util::data::*;
use garage_api_common::helpers::*;
use garage_api_common::signature::verify_signed_content;
use crate::api_server::{ReqBody, ResBody};
use crate::error::*;
use crate::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> { pub async fn handle_get_website(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx { bucket_params, .. } = ctx; let ReqCtx { bucket_params, .. } = ctx;
if let Some(website) = bucket_params.website_config.get() { if let Some(website) = bucket_params.website_config.get() {
@ -277,7 +276,7 @@ impl Redirect {
return Err(Error::bad_request("Bad XML: invalid protocol")); return Err(Error::bad_request("Bad XML: invalid protocol"));
} }
} }
// TODO there are probably more invalid cases, but which ones? // TODO there are probably more invalide cases, but which ones?
Ok(()) Ok(())
} }
} }

View File

@ -1,7 +1,7 @@
use quick_xml::se::to_string; use quick_xml::se::to_string;
use serde::{Deserialize, Serialize, Serializer}; use serde::{Deserialize, Serialize, Serializer};
use crate::error::Error as ApiError; use crate::s3::error::Error as ApiError;
pub fn to_xml_with_header<T: Serialize>(x: &T) -> Result<String, ApiError> { pub fn to_xml_with_header<T: Serialize>(x: &T) -> Result<String, ApiError> {
let mut xml = r#"<?xml version="1.0" encoding="UTF-8"?>"#.to_string(); let mut xml = r#"<?xml version="1.0" encoding="UTF-8"?>"#.to_string();
@ -131,14 +131,6 @@ pub struct CompleteMultipartUploadResult {
pub key: Value, pub key: Value,
#[serde(rename = "ETag")] #[serde(rename = "ETag")]
pub etag: Value, pub etag: Value,
#[serde(rename = "ChecksumCRC32")]
pub checksum_crc32: Option<Value>,
#[serde(rename = "ChecksumCRC32C")]
pub checksum_crc32c: Option<Value>,
#[serde(rename = "ChecksumSHA1")]
pub checksum_sha1: Option<Value>,
#[serde(rename = "ChecksumSHA256")]
pub checksum_sha256: Option<Value>,
} }
#[derive(Debug, Serialize, PartialEq, Eq)] #[derive(Debug, Serialize, PartialEq, Eq)]
@ -205,14 +197,6 @@ pub struct PartItem {
pub part_number: IntValue, pub part_number: IntValue,
#[serde(rename = "Size")] #[serde(rename = "Size")]
pub size: IntValue, pub size: IntValue,
#[serde(rename = "ChecksumCRC32")]
pub checksum_crc32: Option<Value>,
#[serde(rename = "ChecksumCRC32C")]
pub checksum_crc32c: Option<Value>,
#[serde(rename = "ChecksumSHA1")]
pub checksum_sha1: Option<Value>,
#[serde(rename = "ChecksumSHA256")]
pub checksum_sha256: Option<Value>,
} }
#[derive(Debug, Serialize, PartialEq, Eq)] #[derive(Debug, Serialize, PartialEq, Eq)]
@ -516,10 +500,6 @@ mod tests {
bucket: Value("mybucket".to_string()), bucket: Value("mybucket".to_string()),
key: Value("a/plop".to_string()), key: Value("a/plop".to_string()),
etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()), etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()),
checksum_crc32: None,
checksum_crc32c: None,
checksum_sha1: Some(Value("ZJAnHyG8PeKz9tI8UTcHrJos39A=".into())),
checksum_sha256: None,
}; };
assert_eq!( assert_eq!(
to_xml_with_header(&result)?, to_xml_with_header(&result)?,
@ -529,7 +509,6 @@ mod tests {
<Bucket>mybucket</Bucket>\ <Bucket>mybucket</Bucket>\
<Key>a/plop</Key>\ <Key>a/plop</Key>\
<ETag>&quot;3858f62230ac3c915f300c664312c11f-9&quot;</ETag>\ <ETag>&quot;3858f62230ac3c915f300c664312c11f-9&quot;</ETag>\
<ChecksumSHA1>ZJAnHyG8PeKz9tI8UTcHrJos39A=</ChecksumSHA1>\
</CompleteMultipartUploadResult>" </CompleteMultipartUploadResult>"
); );
Ok(()) Ok(())
@ -801,22 +780,12 @@ mod tests {
last_modified: Value("2010-11-10T20:48:34.000Z".to_string()), last_modified: Value("2010-11-10T20:48:34.000Z".to_string()),
part_number: IntValue(2), part_number: IntValue(2),
size: IntValue(10485760), size: IntValue(10485760),
checksum_crc32: None,
checksum_crc32c: None,
checksum_sha256: Some(Value(
"5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=".into(),
)),
checksum_sha1: None,
}, },
PartItem { PartItem {
etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()), etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()),
last_modified: Value("2010-11-10T20:48:33.000Z".to_string()), last_modified: Value("2010-11-10T20:48:33.000Z".to_string()),
part_number: IntValue(3), part_number: IntValue(3),
size: IntValue(10485760), size: IntValue(10485760),
checksum_sha256: None,
checksum_crc32c: None,
checksum_crc32: Some(Value("ZJAnHyG8=".into())),
checksum_sha1: None,
}, },
], ],
initiator: Initiator { initiator: Initiator {
@ -851,14 +820,12 @@ mod tests {
<LastModified>2010-11-10T20:48:34.000Z</LastModified>\ <LastModified>2010-11-10T20:48:34.000Z</LastModified>\
<PartNumber>2</PartNumber>\ <PartNumber>2</PartNumber>\
<Size>10485760</Size>\ <Size>10485760</Size>\
<ChecksumSHA256>5RQ3A5uk0w7ojNjvegohch4JRBBGN/cLhsNrPzfv/hA=</ChecksumSHA256>\
</Part>\ </Part>\
<Part>\ <Part>\
<ETag>&quot;aaaa18db4cc2f85cedef654fccc4a4x8&quot;</ETag>\ <ETag>&quot;aaaa18db4cc2f85cedef654fccc4a4x8&quot;</ETag>\
<LastModified>2010-11-10T20:48:33.000Z</LastModified>\ <LastModified>2010-11-10T20:48:33.000Z</LastModified>\
<PartNumber>3</PartNumber>\ <PartNumber>3</PartNumber>\
<Size>10485760</Size>\ <Size>10485760</Size>\
<ChecksumCRC32>ZJAnHyG8=</ChecksumCRC32>\
</Part>\ </Part>\
<Initiator>\ <Initiator>\
<DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\ <DisplayName>umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx</DisplayName>\

View File

@ -47,8 +47,8 @@ pub async fn check_payload_signature(
let query = parse_query_map(request.uri())?; let query = parse_query_map(request.uri())?;
if query.contains_key(&X_AMZ_ALGORITHM) { if query.contains_key(&X_AMZ_ALGORITHM) {
// We check for presigned-URL-style authentication first, because // We check for presigned-URL-style authentification first, because
// the browser or something else could inject an Authorization header // the browser or someting else could inject an Authorization header
// that is totally unrelated to AWS signatures. // that is totally unrelated to AWS signatures.
check_presigned_signature(garage, service, request, query).await check_presigned_signature(garage, service, request, query).await
} else if request.headers().contains_key(AUTHORIZATION) { } else if request.headers().contains_key(AUTHORIZATION) {
@ -132,7 +132,7 @@ async fn check_presigned_signature(
let authorization = Authorization::parse_presigned(&algorithm.value, &query)?; let authorization = Authorization::parse_presigned(&algorithm.value, &query)?;
// Verify that all necessary request headers are included in signed_headers // Verify that all necessary request headers are included in signed_headers
// For AWSv4 pre-signed URLs, the following must be included: // For AWSv4 pre-signed URLs, the following must be incldued:
// - the Host header (mandatory) // - the Host header (mandatory)
// - all x-amz-* headers used in the request // - all x-amz-* headers used in the request
let signed_headers = split_signed_headers(&authorization)?; let signed_headers = split_signed_headers(&authorization)?;
@ -306,7 +306,7 @@ pub fn canonical_request(
// Note that there is also the issue of path normalization, which I hope is unrelated to the // Note that there is also the issue of path normalization, which I hope is unrelated to the
// one of URI-encoding. At least in aws-sigv4 both parameters can be set independently, // one of URI-encoding. At least in aws-sigv4 both parameters can be set independently,
// and rusoto_signature does not seem to do any effective path normalization, even though // and rusoto_signature does not seem to do any effective path normalization, even though
// it mentions it in the comments (same link to the source code as above). // it mentions it in the comments (same link to the souce code as above).
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so // We make the explicit choice of NOT normalizing paths in the K2V API because doing so
// would make non-normalized paths invalid K2V partition keys, and we don't want that. // would make non-normalized paths invalid K2V partition keys, and we don't want that.
let canonical_uri: std::borrow::Cow<str> = if service != "s3" { let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
@ -518,7 +518,7 @@ impl Authorization {
}) })
} }
pub fn parse_form(params: &HeaderMap) -> Result<Self, Error> { pub(crate) fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
let algorithm = params let algorithm = params
.get(X_AMZ_ALGORITHM) .get(X_AMZ_ALGORITHM)
.ok_or_bad_request("Missing X-Amz-Algorithm header")? .ok_or_bad_request("Missing X-Amz-Algorithm header")?

View File

@ -1,6 +1,6 @@
[package] [package]
name = "garage_block" name = "garage_block"
version = "1.0.1" version = "0.9.4"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license = "AGPL-3.0" license = "AGPL-3.0"
@ -34,8 +34,10 @@ async-compression.workspace = true
zstd.workspace = true zstd.workspace = true
serde.workspace = true serde.workspace = true
serde_bytes.workspace = true
futures.workspace = true futures.workspace = true
futures-util.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-util.workspace = true tokio-util.workspace = true

Some files were not shown because too many files have changed in this diff Show More