From 2730aba14001aee6f73de25bcdeb43fc981eeb6a Mon Sep 17 00:00:00 2001 From: folbrich Date: Sat, 26 Dec 2020 18:36:31 -0700 Subject: [PATCH 1/9] AES encryption for chunks --- compress.go | 19 +++++ coverter.go => converter.go | 18 ----- encrypt.go | 60 +++++++++++++++ encrypt_test.go | 57 ++++++++++++++ gcs.go | 7 +- local.go | 6 +- local_test.go | 143 ++++++++++++++++++++++++++++++++++++ remotehttp.go | 6 +- s3.go | 7 +- sftp.go | 6 +- store.go | 32 ++++++-- 11 files changed, 331 insertions(+), 30 deletions(-) rename coverter.go => converter.go (85%) create mode 100644 encrypt.go create mode 100644 encrypt_test.go diff --git a/compress.go b/compress.go index a841710..dbefeed 100644 --- a/compress.go +++ b/compress.go @@ -21,3 +21,22 @@ func Compress(src []byte) ([]byte, error) { func Decompress(dst, src []byte) ([]byte, error) { return decoder.DecodeAll(src, dst) } + +// Compression layer converter. Compresses/decompresses chunk data +// to and from storage. Implements the converter interface. +type Compressor struct{} + +var _ converter = Compressor{} + +func (d Compressor) toStorage(in []byte) ([]byte, error) { + return Compress(in) +} + +func (d Compressor) fromStorage(in []byte) ([]byte, error) { + return Decompress(nil, in) +} + +func (d Compressor) equal(c converter) bool { + _, ok := c.(Compressor) + return ok +} diff --git a/coverter.go b/converter.go similarity index 85% rename from coverter.go rename to converter.go index a4692ea..1b2d609 100644 --- a/coverter.go +++ b/converter.go @@ -77,21 +77,3 @@ type converter interface { equal(converter) bool } - -// Compression layer -type Compressor struct{} - -var _ converter = Compressor{} - -func (d Compressor) toStorage(in []byte) ([]byte, error) { - return Compress(in) -} - -func (d Compressor) fromStorage(in []byte) ([]byte, error) { - return Decompress(nil, in) -} - -func (d Compressor) equal(c converter) bool { - _, ok := c.(Compressor) - return ok -} diff --git a/encrypt.go b/encrypt.go new file mode 100644 index 0000000..59fbc49 --- /dev/null +++ b/encrypt.go @@ -0,0 +1,60 @@ +package desync + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "errors" + "io" +) + +// aes256ctr is an encryption layer for chunk storage. It +// encrypts/decrypts to/from storage using aes-256-ctr. +// The key is generated from a passphrase with SHA256. +type aes256ctr struct { + key []byte + block cipher.Block +} + +var _ converter = aes256ctr{} + +func NewAES256CTR(passphrase string) (aes256ctr, error) { + key := sha256.Sum256([]byte(passphrase)) + block, err := aes.NewCipher(key[:]) + return aes256ctr{key: key[:], block: block}, err +} + +// encrypt for storage. The IV is prepended to the data. +func (d aes256ctr) toStorage(in []byte) ([]byte, error) { + out := make([]byte, aes.BlockSize+len(in)) + iv := out[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, err + } + stream := cipher.NewCTR(d.block, iv) + stream.XORKeyStream(out[aes.BlockSize:], in) + return out, nil +} + +// decrypt from storage. The IV is taken from the start of the +// chunk data. +func (d aes256ctr) fromStorage(in []byte) ([]byte, error) { + if len(in) < aes.BlockSize { + return nil, errors.New("no iv prefix found in chunk, not encrypted or wrong algorithm") + } + out := make([]byte, len(in)-aes.BlockSize) + iv := in[:aes.BlockSize] + stream := cipher.NewCTR(d.block, iv) + stream.XORKeyStream(out, in[aes.BlockSize:]) + return out, nil +} + +func (d aes256ctr) equal(c converter) bool { + other, ok := c.(aes256ctr) + if !ok { + return false + } + return bytes.Equal(d.key, other.key) +} diff --git a/encrypt_test.go b/encrypt_test.go new file mode 100644 index 0000000..39b6fae --- /dev/null +++ b/encrypt_test.go @@ -0,0 +1,57 @@ +package desync + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAES256CTREncryptDecrypt(t *testing.T) { + plainIn := []byte{1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} + + // Make two converters. One for encryption and one for decryption. Could use + // just one but this way we confirm the key generation is consistent + enc, err := NewAES256CTR("secret-password") + require.NoError(t, err) + dec, err := NewAES256CTR("secret-password") + require.NoError(t, err) + + // Encrypt the data + ciphertext, err := enc.toStorage(plainIn) + require.NoError(t, err) + + // Confirm the ciphertext is actually different than what went in + require.NotEqual(t, plainIn, ciphertext) + + // Decrypt it + plainOut, err := dec.fromStorage(ciphertext) + require.NoError(t, err) + + // This should match the original data of course + require.Equal(t, plainIn, plainOut) + + // Make another instance with a different password + diffPw, err := NewAES256CTR("something-else") + require.NoError(t, err) + + // Try to decrypt the data, should end up with garbage + diffOut, err := diffPw.fromStorage(ciphertext) + require.NoError(t, err) + require.NotEqual(t, plainIn, diffOut) +} + +func TestAES256CTRCompare(t *testing.T) { + // Make three converters. Two with the same, one with a diff password + enc1, err := NewAES256CTR("secret-password") + require.NoError(t, err) + enc2, err := NewAES256CTR("secret-password") + require.NoError(t, err) + diffPw, err := NewAES256CTR("something-else") + require.NoError(t, err) + + // Check equality method + require.True(t, enc1.equal(enc2)) + require.True(t, enc2.equal(enc1)) + require.False(t, diffPw.equal(enc1)) + require.False(t, enc1.equal(diffPw)) +} diff --git a/gcs.go b/gcs.go index 93e8fab..7fc4168 100644 --- a/gcs.go +++ b/gcs.go @@ -53,9 +53,12 @@ func normalizeGCPrefix(path string) string { // NewGCStoreBase initializes a base object used for chunk or index stores // backed by Google Storage. func NewGCStoreBase(u *url.URL, opt StoreOptions) (GCStoreBase, error) { - var err error ctx := context.TODO() - s := GCStoreBase{Location: u.String(), opt: opt, converters: opt.converters()} + converters, err := opt.converters() + if err != nil { + return GCStoreBase{}, err + } + s := GCStoreBase{Location: u.String(), opt: opt, converters: converters} if u.Scheme != "gs" { return s, fmt.Errorf("invalid scheme '%s', expected 'gs'", u.Scheme) } diff --git a/local.go b/local.go index f38c69b..e84738f 100644 --- a/local.go +++ b/local.go @@ -42,7 +42,11 @@ func NewLocalStore(dir string, opt StoreOptions) (LocalStore, error) { if !info.IsDir() { return LocalStore{}, fmt.Errorf("%s is not a directory", dir) } - return LocalStore{Base: dir, opt: opt, converters: opt.converters()}, nil + converters, err := opt.converters() + if err != nil { + return LocalStore{}, err + } + return LocalStore{Base: dir, opt: opt, converters: converters}, nil } // GetChunk reads and returns one (compressed!) chunk from the store diff --git a/local_test.go b/local_test.go index 0f956f9..9b79302 100644 --- a/local_test.go +++ b/local_test.go @@ -147,3 +147,146 @@ func TestLocalStoreErrorHandling(t *testing.T) { t.Fatal(err) } } + +func TestLocalStoreUncompressedEncrypted(t *testing.T) { + store := t.TempDir() + + s, err := NewLocalStore(store, + StoreOptions{ + Uncompressed: true, + Encryption: true, + EncryptionPassword: "test-password", + }, + ) + require.NoError(t, err) + + // Make up some data and store it + dataIn := []byte("some data") + + chunkIn := NewChunk(dataIn) + id := chunkIn.ID() + + err = s.StoreChunk(chunkIn) + require.NoError(t, err) + + // Check it's in the store + hasChunk, err := s.HasChunk(id) + require.NoError(t, err) + require.True(t, hasChunk, "chunk not found in store") + + // Pull the data the "official" way + chunkOut, err := s.GetChunk(id) + require.NoError(t, err) + + dataOut, err := chunkOut.Data() + require.NoError(t, err) + + // Compare the data that went in with what came out + require.Equal(t, dataIn, dataOut) + + // Now let's look at the file in the store directly to make sure it's actually + // encrypted, meaning it should not match the plain (uncompressed) text + _, name := s.nameFromID(id) + b, err := ioutil.ReadFile(name) + require.NoError(t, err) + require.NotEqual(t, dataIn, b, "chunk is not encrypted") +} + +func TestLocalStoreCompressedEncrypted(t *testing.T) { + store := t.TempDir() + + s, err := NewLocalStore(store, + StoreOptions{ + Uncompressed: false, + Encryption: true, + EncryptionPassword: "test-password", + }, + ) + require.NoError(t, err) + + // Make up some data and store it + dataIn := []byte("some data") + + chunkIn := NewChunk(dataIn) + id := chunkIn.ID() + + err = s.StoreChunk(chunkIn) + require.NoError(t, err) + + // Check it's in the store + hasChunk, err := s.HasChunk(id) + require.NoError(t, err) + require.True(t, hasChunk, "chunk not found in store") + + // Pull the data the "official" way + chunkOut, err := s.GetChunk(id) + require.NoError(t, err) + + dataOut, err := chunkOut.Data() + require.NoError(t, err) + + // Compare the data that went in with what came out + require.Equal(t, dataIn, dataOut) + + // Now let's look at the file in the store directly and confirm it is + // compressed and encrypted (in that order!). + _, name := s.nameFromID(id) + b, err := ioutil.ReadFile(name) + require.NoError(t, err) + + // First decrypt it, using the correct password + dec, _ := NewAES256CTR("test-password") + decrypted, err := dec.fromStorage(b) + require.NoError(t, err) + + // Now decompress + decompressed, err := Decompress(nil, decrypted) + require.NoError(t, err) + + // And it should match the original content + require.Equal(t, dataIn, decompressed) +} + +func TestLocalStorePasswordMismatch(t *testing.T) { + store := t.TempDir() + + // Build 2 stores accessing the same files but with different passwords + s1, err := NewLocalStore(store, + StoreOptions{ + Encryption: true, + EncryptionPassword: "good-password", + }, + ) + require.NoError(t, err) + s2, err := NewLocalStore(store, + StoreOptions{ + Encryption: true, + EncryptionPassword: "bad-password", + }, + ) + require.NoError(t, err) + + // Make up some data and store it using the good password + dataIn := []byte("some data") + + chunkIn := NewChunk(dataIn) + id := chunkIn.ID() + + err = s1.StoreChunk(chunkIn) + require.NoError(t, err) + + // Pull the data with the good password and compare it + chunkOut, err := s1.GetChunk(id) + require.NoError(t, err) + dataOut, err := chunkOut.Data() + require.NoError(t, err) + require.Equal(t, dataIn, dataOut) + + // Try to get the chunk with a bad password, expect a signature mismatch + _, err = s2.GetChunk(id) + require.Error(t, err) + + if _, ok := err.(ChunkInvalid); !ok { + t.Fatalf("expected ChunkInvalid error, but got %T", err) + } +} diff --git a/remotehttp.go b/remotehttp.go index 4c46b69..2acbb22 100644 --- a/remotehttp.go +++ b/remotehttp.go @@ -89,7 +89,11 @@ func NewRemoteHTTPStoreBase(location *url.URL, opt StoreOptions) (*RemoteHTTPBas } client := &http.Client{Transport: tr, Timeout: timeout} - return &RemoteHTTPBase{location: location, client: client, opt: opt, converters: opt.converters()}, nil + converters, err := opt.converters() + if err != nil { + return nil, err + } + return &RemoteHTTPBase{location: location, client: client, opt: opt, converters: converters}, nil } func (r *RemoteHTTPBase) String() string { diff --git a/s3.go b/s3.go index e816612..6dbd429 100644 --- a/s3.go +++ b/s3.go @@ -32,8 +32,11 @@ type S3Store struct { // NewS3StoreBase initializes a base object used for chunk or index stores backed by S3. func NewS3StoreBase(u *url.URL, s3Creds *credentials.Credentials, region string, opt StoreOptions, lookupType minio.BucketLookupType) (S3StoreBase, error) { - var err error - s := S3StoreBase{Location: u.String(), opt: opt, converters: opt.converters()} + converters, err := opt.converters() + if err != nil { + return S3StoreBase{}, err + } + s := S3StoreBase{Location: u.String(), opt: opt, converters: converters} if !strings.HasPrefix(u.Scheme, "s3+http") { return s, fmt.Errorf("invalid scheme '%s', expected 's3+http' or 's3+https'", u.Scheme) } diff --git a/sftp.go b/sftp.go index f27522c..d869a0a 100644 --- a/sftp.go +++ b/sftp.go @@ -143,7 +143,11 @@ func (s *SFTPStoreBase) nameFromID(id ChunkID) string { // NewSFTPStore initializes a chunk store using SFTP over SSH. func NewSFTPStore(location *url.URL, opt StoreOptions) (*SFTPStore, error) { - s := &SFTPStore{make(chan *SFTPStoreBase, opt.N), location, opt.N, opt.converters()} + converters, err := opt.converters() + if err != nil { + return nil, err + } + s := &SFTPStore{make(chan *SFTPStoreBase, opt.N), location, opt.N, converters} for i := 0; i < opt.N; i++ { c, err := newSFTPStoreBase(location, opt) if err != nil { diff --git a/store.go b/store.go index ac7c464..22058b8 100644 --- a/store.go +++ b/store.go @@ -2,6 +2,7 @@ package desync import ( "context" + "errors" "fmt" "io" "time" @@ -86,17 +87,38 @@ type StoreOptions struct { // Store and read chunks uncompressed, without chunk file extension Uncompressed bool `json:"uncompressed"` + + // Store encryption settings. The only algorithm currently supported is aes-256-ctr which + // is also the default. + Encryption bool `json:"encryption"` + EncryptionAlgorithm string `json:"encryption-algorithm"` + EncryptionPassword string `json:"encryption-password"` } // Returns data converters that convert between plain and storage-format. Each layer // represents a modification such as compression or encryption and is applied in order // depending the direction of data. If data is written to storage, the layer's toStorage -// method is called in the order they are returned. If data is read, the fromStorage +// method is called in the order they are defined. If data is read, the fromStorage // method is called in reverse order. -func (o StoreOptions) converters() []converter { - var m []converter +func (o StoreOptions) converters() ([]converter, error) { + var c []converter if !o.Uncompressed { - m = append(m, Compressor{}) + c = append(c, Compressor{}) + } + if o.Encryption { + if o.EncryptionPassword == "" { + return nil, errors.New("no encryption password configured") + } + switch o.EncryptionAlgorithm { + case "", "aes-256-ctr": + enc, err := NewAES256CTR(o.EncryptionPassword) + if err != nil { + return nil, err + } + c = append(c, enc) + default: + return nil, fmt.Errorf("unsupported encryption algorithm %q", o.EncryptionAlgorithm) + } } - return m + return c, nil } From 02a253409877f2fa23e9f6cb147c96156706610e Mon Sep 17 00:00:00 2001 From: folbrich Date: Sun, 27 Dec 2020 09:48:17 -0700 Subject: [PATCH 2/9] Update docs --- README.md | 17 +++++++++++++++-- encrypt.go | 3 ++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 6b4ff96..b26fc05 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,7 @@ Among the distinguishing factors: - Built-in HTTP(S) index server to read/write indexes - Reflinking matching blocks (rather than copying) from seed files if supported by the filesystem (currently only Btrfs and XFS) - catar archives can be created from standard tar archives, and they can also be extracted to GNU tar format. +- Optional chunk store encryption with AES-265-CTR. ## Terminology @@ -233,6 +234,10 @@ If the client configures the HTTP chunk server to be uncompressed (`chunk-server Compressed and uncompressed chunks can live in the same store and don't interfere with each other. A store that's configured for compressed chunks by configuring it client-side will not see the uncompressed chunks that may be present. `prune` and `verify` too will ignore any chunks written in the other format. Both kinds of chunks can be accessed by multiple clients concurrently and independently. +### Chunk Encryption + +Chunks can be encrypted with a symmetric algorithm on a per-store basis. To use encryption, it has to be enabled in the [configuration](Configuration) file, and an algorithm needs to be specified. A single instance of desync can use multiple stores at the same time, each with a different (or the same) encryption mode and key. Encryption passwords can not be changed for a store as that would invalidate the existing chunks in the store. To change the key, create a new store, then either re-chunk the data, or use `desync cache -c -s ` to decrypt the chunks from the old store and re-encrypt with the new key in the new store. + ### Configuration For most use cases, it is sufficient to use the tool's default configuration not requiring a config file. Having a config file `$HOME/.config/desync/config.json` allows for further customization of timeouts, error retry behaviour or credentials that can't be set via command-line options or environment variables. All values have sensible defaults if unconfigured. Only add configuration for values that differ from the defaults. To view the current configuration, use `desync config`. If no config file is present, this will show the defaults. To create a config file allowing custom values, use `desync config -w` which will write the current configuration to the file, then edit the file. @@ -242,17 +247,20 @@ Available configuration values: - `http-timeout` *DEPRECATED, see `store-options..timeout`* - HTTP request timeout used in HTTP stores (not S3) in nanoseconds - `http-error-retry` *DEPRECATED, see `store-options..error-retry` - Number of times to retry failed chunk requests from HTTP stores - `s3-credentials` - Defines credentials for use with S3 stores. Especially useful if more than one S3 store is used. The key in the config needs to be the URL scheme and host used for the store, excluding the path, but including the port number if used in the store URL. It is also possible to use a [standard aws credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html) in order to store s3 credentials. -- `store-options` - Allows customization of chunk and index stores, for example comression settings, timeouts, retry behavior and keys. Not all options are applicable to every store, some of these like `timeout` are ignored for local stores. Some of these options, such as the client certificates are overwritten with any values set in the command line. Note that the store location used in the command line needs to match the key under `store-options` exactly for these options to be used. Watch out for trailing `/` in URLs. +- `store-options` - Allows customization of chunk and index stores, for example compression settings, timeouts, retry behavior and keys. Not all options are applicable to every store, some of these like `timeout` are ignored for local stores. Some of these options, such as the client certificates are overwritten with any values set in the command line. Note that the store location used in the command line needs to match the key under `store-options` exactly for these options to be used. Watch out for trailing `/` in URLs. - `timeout` - Time limit for chunk read or write operation in nanoseconds. Default: 1 minute. If set to a negative value, timeout is infinite. - `error-retry` - Number of times to retry failed chunk requests. Default: 0. - `error-retry-base-interval` - Number of nanoseconds to wait before first retry attempt. Retry attempt number N for the same request will wait N times this interval. Default: 0. - - `client-cert` - Cerificate file to be used for stores where the server requires mutual SSL. + - `client-cert` - Certificate file to be used for stores where the server requires mutual SSL. - `client-key` - Key file to be used for stores where the server requires mutual SSL. - `ca-cert` - Certificate file containing trusted certs or CAs. - `trust-insecure` - Trust any certificate presented by the server. - `skip-verify` - Disables data integrity verification when reading chunks to improve performance. Only recommended when chaining chunk stores with the `chunk-server` command using compressed stores. - `uncompressed` - Reads and writes uncompressed chunks from/to this store. This can improve performance, especially for local stores or caches. Compressed and uncompressed chunks can coexist in the same store, but only one kind is read or written by one client. - `http-auth` - Value of the Authorization header in HTTP requests. This could be a bearer token with `"Bearer "` or a Base64-encoded username and password pair for basic authentication like `"Basic dXNlcjpwYXNzd29yZAo="`. + - `encryption` - Must be set to `true` to encrypt chunks in the store. + - `encryption-password` - Encryption password to use for all chunks in the store. + - `encryption-algorithm` - Optional, symmetric encryption algorithm. Default `aes-256-ctr`. #### Example config @@ -287,6 +295,11 @@ Available configuration values: }, "/path/to/local/cache": { "uncompressed": true + }, + "/path/to/encrypted/store": { + "encryption": true, + "encryption-algorithm": "aes-256-ctr", + "encryption-password": "mystorepassword" } } } diff --git a/encrypt.go b/encrypt.go index 59fbc49..b839898 100644 --- a/encrypt.go +++ b/encrypt.go @@ -39,7 +39,8 @@ func (d aes256ctr) toStorage(in []byte) ([]byte, error) { } // decrypt from storage. The IV is taken from the start of the -// chunk data. +// chunk data. This by itself does not verify integrity. That +// is achieved by the existing chunk validation. func (d aes256ctr) fromStorage(in []byte) ([]byte, error) { if len(in) < aes.BlockSize { return nil, errors.New("no iv prefix found in chunk, not encrypted or wrong algorithm") From f7f7a161ed7fef3fcbddefc855e1fd483de62c1a Mon Sep 17 00:00:00 2001 From: folbrich Date: Sun, 27 Dec 2020 14:30:25 -0700 Subject: [PATCH 3/9] Add encryption options to chunk-server. Also fixes an old error handling bug --- cmd/desync/chunkserver.go | 20 ++++++++++++++++- cmd/desync/chunkserver_test.go | 40 ++++++++++++++++++++++++++++++++++ httphandler_test.go | 38 ++++++++++++++++++++++++++++++++ remotehttp.go | 3 +-- remotehttp_test.go | 40 ++++++++++++++++++++++++++++++++++ store.go | 6 ++--- 6 files changed, 141 insertions(+), 6 deletions(-) diff --git a/cmd/desync/chunkserver.go b/cmd/desync/chunkserver.go index 6791ad4..57a1651 100644 --- a/cmd/desync/chunkserver.go +++ b/cmd/desync/chunkserver.go @@ -24,6 +24,8 @@ type chunkServerOptions struct { skipVerifyWrite bool uncompressed bool logFile string + encryptionAlg string + encryptionPw string } func newChunkServerCommand(ctx context.Context) *cobra.Command { @@ -68,6 +70,8 @@ needing to restart the server. This can be done under load as well. flags.BoolVar(&opt.skipVerifyWrite, "skip-verify-write", true, "don't verify chunk data written to this server (faster)") flags.BoolVarP(&opt.uncompressed, "uncompressed", "u", false, "serve uncompressed chunks") flags.StringVar(&opt.logFile, "log", "", "request log file or - for STDOUT") + flags.StringVar(&opt.encryptionPw, "encryption-password", "", "serve chunks encrypted with this password") + flags.StringVar(&opt.encryptionAlg, "encryption-algorithm", "aes-256-ctr", "encryption algorithm") addStoreOptions(&opt.cmdStoreOptions, flags) addServerOptions(&opt.cmdServerOptions, flags) return cmd @@ -127,9 +131,23 @@ func runChunkServer(ctx context.Context, opt chunkServerOptions, args []string) } defer s.Close() + // Build the converters. In this case, the "storage" side is what is served + // up by the server towards the client. var converters desync.Converters if !opt.uncompressed { - converters = desync.Converters{desync.Compressor{}} + converters = append(converters, desync.Compressor{}) + } + if opt.encryptionPw != "" { + switch opt.encryptionAlg { + case "", "aes-256-ctr": + enc, err := desync.NewAES256CTR(opt.encryptionPw) + if err != nil { + return err + } + converters = append(converters, enc) + default: + return fmt.Errorf("unsupported encryption algorithm %q", opt.encryptionAlg) + } } handler := desync.NewHTTPHandler(s, opt.writable, opt.skipVerifyWrite, converters, opt.auth) diff --git a/cmd/desync/chunkserver_test.go b/cmd/desync/chunkserver_test.go index a984095..e494518 100644 --- a/cmd/desync/chunkserver_test.go +++ b/cmd/desync/chunkserver_test.go @@ -181,3 +181,43 @@ func startChunkServer(t *testing.T, args ...string) (string, context.CancelFunc) time.Sleep(time.Second) return addr, cancel } + +func TestChunkServerEncryption(t *testing.T) { + outdir := t.TempDir() + + // Start a (writable) server, it'll expect compressed+encrypted chunks over + // the wire while storing them only compressed in the local store + addr, cancel := startChunkServer(t, "-s", outdir, "-w", "--skip-verify-read=false", "--skip-verify-write=false", "--encryption-password", "testpassword") + defer cancel() + store := fmt.Sprintf("http://%s/", addr) + + // Build a client config. The client needs to be setup to talk to the HTTP chunk server + // compressed+encrypted. Create a temp JSON config for that HTTP store and load it. + cfgFile = filepath.Join(outdir, "config.json") + cfgFileContent := fmt.Sprintf(`{"store-options": {"%s":{"encryption": true, "encryption-password": "testpassword"}}}`, store) + require.NoError(t, ioutil.WriteFile(cfgFile, []byte(cfgFileContent), 0644)) + initConfig() + + // Run a "chop" command to send some chunks (encrypted) over HTTP, then have the server + // store them un-encrypted in its local store. + chopCmd := newChopCommand(context.Background()) + chopCmd.SetArgs([]string{"-s", store, "testdata/blob1.caibx", "testdata/blob1"}) + chopCmd.SetOutput(ioutil.Discard) + _, err := chopCmd.ExecuteC() + require.NoError(t, err) + + // Now read it all back over HTTP (again encrypted) and re-assemble the test file + extractFile := filepath.Join(outdir, "blob1") + extractCmd := newExtractCommand(context.Background()) + extractCmd.SetArgs([]string{"-s", store, "testdata/blob1.caibx", extractFile}) + extractCmd.SetOutput(ioutil.Discard) + _, err = extractCmd.ExecuteC() + require.NoError(t, err) + + // Not actually necessary, but for good measure let's compare the blobs + blobIn, err := ioutil.ReadFile("testdata/blob1") + require.NoError(t, err) + blobOut, err := ioutil.ReadFile(extractFile) + require.NoError(t, err) + require.Equal(t, blobIn, blobOut) +} diff --git a/httphandler_test.go b/httphandler_test.go index 86068cc..93e368c 100644 --- a/httphandler_test.go +++ b/httphandler_test.go @@ -103,3 +103,41 @@ func TestHTTPHandlerCompression(t *testing.T) { _, err = unStore.GetChunk(id) require.NoError(t, err) } + +func TestHTTPHandlerEncryption(t *testing.T) { + // Prep a local store (no encryption) + store := t.TempDir() + upstream, err := NewLocalStore(store, StoreOptions{}) + require.NoError(t, err) + + // Start a read-write capable server with Encryption, no Compression + enc, err := NewAES256CTR("testpassword") + require.NoError(t, err) + server := httptest.NewServer(NewHTTPHandler(upstream, true, false, []converter{enc}, "")) + defer server.Close() + + // Initialize HTTP chunks store (client) + httpStoreURL, _ := url.Parse(server.URL) + httpStore, err := NewRemoteHTTPStore(httpStoreURL, StoreOptions{ + Uncompressed: true, + Encryption: true, + EncryptionPassword: "testpassword", + }) + require.NoError(t, err) + + // Make up some data and store it in the RW store + dataIn := []byte("some data") + chunkIn := NewChunk(dataIn) + id := chunkIn.ID() + + // Write a chunk via HTTP + err = httpStore.StoreChunk(chunkIn) + require.NoError(t, err) + + // Read it back via HTTP and compare to the original + chunkOut, err := httpStore.GetChunk(id) + require.NoError(t, err) + dataOut, err := chunkOut.Data() + require.NoError(t, err) + require.Equal(t, dataIn, dataOut) +} diff --git a/remotehttp.go b/remotehttp.go index 2acbb22..6789c47 100644 --- a/remotehttp.go +++ b/remotehttp.go @@ -157,11 +157,10 @@ func (r *RemoteHTTPBase) IssueRetryableHttpRequest(method string, u *url.URL, ge retry: attempt++ statusCode, responseBody, err := r.IssueHttpRequest(method, u, getReader, attempt) - if (err != nil) || (statusCode >= 500 && statusCode < 600) { if attempt >= r.opt.ErrorRetry { log.WithField("attempt", attempt).Debug("failed, giving up") - return 0, nil, err + return statusCode, responseBody, err } else { log.WithField("attempt", attempt).WithField("delay", attempt).Debug("waiting, then retrying") time.Sleep(time.Duration(attempt) * r.opt.ErrorRetryBaseInterval) diff --git a/remotehttp_test.go b/remotehttp_test.go index 137d443..bdc9c6a 100644 --- a/remotehttp_test.go +++ b/remotehttp_test.go @@ -1,6 +1,7 @@ package desync import ( + "bytes" "io" "io/ioutil" "net/http" @@ -8,6 +9,8 @@ import ( "net/url" "testing" "time" + + "github.com/stretchr/testify/require" ) func TestHTTPStoreURL(t *testing.T) { @@ -299,3 +302,40 @@ func TestPutChunk(t *testing.T) { }) } } + +func TestRemoteHTTPPutEncrypted(t *testing.T) { + body := new(bytes.Buffer) + + // Setup a dummy server that records the request body (raw chunk data) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.Copy(body, r.Body) + })) + defer ts.Close() + u, _ := url.Parse(ts.URL) + + // HTTP client store with encryption and compression + httpStore, err := NewRemoteHTTPStore(u, StoreOptions{ + Uncompressed: false, + Encryption: true, + EncryptionPassword: "testpassword", + }) + require.NoError(t, err) + + // Prep a test chunk + dataIn := []byte("some data") + chunkIn := NewChunk(dataIn) + + // Send the chunk over HTTP + err = httpStore.StoreChunk(chunkIn) + require.NoError(t, err) + + // If everything worked, the request body should be the chunk data, first + // compressed, then encrypted. Unwind it manually to check the layers are in order. + dec, err := NewAES256CTR("testpassword") + require.NoError(t, err) + decrypted, err := dec.fromStorage(body.Bytes()) + require.NoError(t, err) + uncompressed, err := Decompress(nil, decrypted) + require.NoError(t, err) + require.Equal(t, dataIn, uncompressed) +} diff --git a/store.go b/store.go index 22058b8..366892a 100644 --- a/store.go +++ b/store.go @@ -90,9 +90,9 @@ type StoreOptions struct { // Store encryption settings. The only algorithm currently supported is aes-256-ctr which // is also the default. - Encryption bool `json:"encryption"` - EncryptionAlgorithm string `json:"encryption-algorithm"` - EncryptionPassword string `json:"encryption-password"` + Encryption bool `json:"encryption,omitempty"` + EncryptionAlgorithm string `json:"encryption-algorithm,omitempty"` + EncryptionPassword string `json:"encryption-password,omitempty"` } // Returns data converters that convert between plain and storage-format. Each layer From 9fb3014ddc4739208dc926169f68524b4687083f Mon Sep 17 00:00:00 2001 From: folbrich Date: Wed, 30 Dec 2020 12:37:59 -0700 Subject: [PATCH 4/9] Calculate a KeyID and use it to build chunk file extensions --- README.md | 11 ++++++++++- compress.go | 4 ++++ const.go | 6 ------ converter.go | 17 +++++++++++++++++ encrypt.go | 12 +++++++++++- encrypt_test.go | 14 ++++++++++++++ gcs.go | 24 ++++++------------------ httphandler.go | 9 +++------ local.go | 38 +++++++++----------------------------- local_test.go | 7 ++++--- remotehttp.go | 7 +------ s3.go | 22 +++++----------------- sftp.go | 44 +++++++++++++++----------------------------- sftpindex.go | 2 +- 14 files changed, 100 insertions(+), 117 deletions(-) diff --git a/README.md b/README.md index b26fc05..762ed8a 100644 --- a/README.md +++ b/README.md @@ -236,7 +236,16 @@ Compressed and uncompressed chunks can live in the same store and don't interfer ### Chunk Encryption -Chunks can be encrypted with a symmetric algorithm on a per-store basis. To use encryption, it has to be enabled in the [configuration](Configuration) file, and an algorithm needs to be specified. A single instance of desync can use multiple stores at the same time, each with a different (or the same) encryption mode and key. Encryption passwords can not be changed for a store as that would invalidate the existing chunks in the store. To change the key, create a new store, then either re-chunk the data, or use `desync cache -c -s ` to decrypt the chunks from the old store and re-encrypt with the new key in the new store. +Chunks can be encrypted with a symmetric algorithm on a per-store basis. To use encryption, it has to be enabled in the [configuration](Configuration) file, and an algorithm needs to be specified. A single instance of desync can use multiple stores at the same time, each with a different (or the same) encryption mode and key. Encrypted chunks are stores with file extensions containing the algorithm and a key identifier. If the password for a store is changed, all existing chunks in it will become "invisible" since the extension would no longer match. To change the key, chunks have to be re-encrypted with the new key. That could happen into same, or better, a new store. Create a new store, then either re-chunk the data, or use `desync cache -c -s ` to decrypt the chunks from the old store and re-encrypt with the new key in the new store. + +Chunk extensions are chosen based on compression or encryption settings as follows: + +| Compressed | Encrypted | Extension | Example | +|:---:|:---:|:---:|:---:| +| no | no | n/a | `fbef/fbef1a00ceda67e2abc49f707fd70e315fab60eacd19c257e23897339280ce78` | +| yes | no | `.cacnk` | `ffbef/fbef1a00ceda67e2abc49f707fd70e315fab60eacd19c257e23897339280ce78.cacnk` | +| no | yes | `.-` | `fbef/fbef1a00ceda67e2abc49f707fd70e315fab60eacd19c257e23897339280ce78.aes-256-ctr-635af003` | +| yes | yes | `.cacnk.-` | `fbef/fbef1a00ceda67e2abc49f707fd70e315fab60eacd19c257e23897339280ce78.cacnk.aes-256-ctr-635af003` | ### Configuration diff --git a/compress.go b/compress.go index dbefeed..915882e 100644 --- a/compress.go +++ b/compress.go @@ -40,3 +40,7 @@ func (d Compressor) equal(c converter) bool { _, ok := c.(Compressor) return ok } + +func (d Compressor) storageExtension() string { + return ".cacnk" +} diff --git a/const.go b/const.go index d756443..7be7546 100644 --- a/const.go +++ b/const.go @@ -137,9 +137,3 @@ var ( CaFormatTableTailMarker: "CaFormatTableTailMarker", } ) - -// CompressedChunkExt is the file extension used for compressed chunks -const CompressedChunkExt = ".cacnk" - -// UncompressedChunkExt is the file extension of uncompressed chunks -const UncompressedChunkExt = "" diff --git a/converter.go b/converter.go index 1b2d609..9088d97 100644 --- a/converter.go +++ b/converter.go @@ -1,5 +1,7 @@ package desync +import "strings" + // Converters are modifiers for chunk data, such as compression or encryption. // They are used to prepare chunk data for storage, or to read it from storage. // The order of the conversion layers matters. When plain data is prepared for @@ -63,6 +65,16 @@ func (s Converters) equal(c Converters) bool { return true } +// Extension to be used in storage. Concatenation of converter +// extensions in order (towards storage). +func (s Converters) storageExtension() string { + var ext strings.Builder + for _, layer := range s { + ext.WriteString(layer.storageExtension()) + } + return ext.String() +} + // converter is a storage data modifier layer. type converter interface { // Convert data from it's original form to storage format. @@ -75,5 +87,10 @@ type converter interface { // the output may be used for the next conversion layer. fromStorage([]byte) ([]byte, error) + // Returns the file extension that should be used for a + // chunk when stored. Usually a concatenation of layers. + storageExtension() string + + // True is one converter matches another exactly. equal(converter) bool } diff --git a/encrypt.go b/encrypt.go index b839898..506cad6 100644 --- a/encrypt.go +++ b/encrypt.go @@ -7,6 +7,7 @@ import ( "crypto/rand" "crypto/sha256" "errors" + "fmt" "io" ) @@ -16,14 +17,19 @@ import ( type aes256ctr struct { key []byte block cipher.Block + + // Chunk extension with identifier derived from the key. + extension string } var _ converter = aes256ctr{} func NewAES256CTR(passphrase string) (aes256ctr, error) { key := sha256.Sum256([]byte(passphrase)) + keyHash := sha256.Sum256(key[:]) + extension := fmt.Sprintf(".aes-256-ctr-%x", keyHash[:4]) block, err := aes.NewCipher(key[:]) - return aes256ctr{key: key[:], block: block}, err + return aes256ctr{key: key[:], block: block, extension: extension}, err } // encrypt for storage. The IV is prepended to the data. @@ -59,3 +65,7 @@ func (d aes256ctr) equal(c converter) bool { } return bytes.Equal(d.key, other.key) } + +func (d aes256ctr) storageExtension() string { + return d.extension +} diff --git a/encrypt_test.go b/encrypt_test.go index 39b6fae..3466788 100644 --- a/encrypt_test.go +++ b/encrypt_test.go @@ -55,3 +55,17 @@ func TestAES256CTRCompare(t *testing.T) { require.False(t, diffPw.equal(enc1)) require.False(t, enc1.equal(diffPw)) } + +func TestAES256CTRExtension(t *testing.T) { + enc1, err := NewAES256CTR("secret-password") + require.NoError(t, err) + + // Confirm that we have a key-handle in the file extension + require.Equal(t, ".aes-256-ctr-16db3403", enc1.extension) + + // If algorithm and password are the same, the same key + // handle (extension) should be produced every time + enc2, err := NewAES256CTR("secret-password") + require.NoError(t, err) + require.Equal(t, enc1.extension, enc2.extension) +} diff --git a/gcs.go b/gcs.go index 7fc4168..ab6275c 100644 --- a/gcs.go +++ b/gcs.go @@ -80,7 +80,7 @@ func (s GCStoreBase) String() string { return s.Location } -// Close the GCS base store. NOP opertation but needed to implement the store interface. +// Close the GCS base store. NOP operation but needed to implement the store interface. func (s GCStoreBase) Close() error { return nil } // NewGCStore creates a chunk store with Google Storage backing. The URL @@ -258,28 +258,16 @@ func (s GCStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error { func (s GCStore) nameFromID(id ChunkID) string { sID := id.String() - name := s.prefix + sID[0:4] + "/" + sID - if s.opt.Uncompressed { - name += UncompressedChunkExt - } else { - name += CompressedChunkExt - } + name := s.prefix + sID[0:4] + "/" + sID + s.converters.storageExtension() return name } func (s GCStore) idFromName(name string) (ChunkID, error) { - var n string - if s.opt.Uncompressed { - if !strings.HasSuffix(name, UncompressedChunkExt) { - return ChunkID{}, fmt.Errorf("object %s is not a chunk", name) - } - n = strings.TrimSuffix(strings.TrimPrefix(name, s.prefix), UncompressedChunkExt) - } else { - if !strings.HasSuffix(name, CompressedChunkExt) { - return ChunkID{}, fmt.Errorf("object %s is not a chunk", name) - } - n = strings.TrimSuffix(strings.TrimPrefix(name, s.prefix), CompressedChunkExt) + extension := s.converters.storageExtension() + if !strings.HasSuffix(name, extension) { + return ChunkID{}, fmt.Errorf("object %s is not a chunk", name) } + n := strings.TrimSuffix(strings.TrimPrefix(name, s.prefix), extension) fragments := strings.Split(n, "/") if len(fragments) != 2 { return ChunkID{}, fmt.Errorf("incorrect chunk name for object %s", name) diff --git a/httphandler.go b/httphandler.go index 06e9d50..4a20475 100644 --- a/httphandler.go +++ b/httphandler.go @@ -124,12 +124,9 @@ func (h HTTPHandler) put(id ChunkID, w http.ResponseWriter, r *http.Request) { } func (h HTTPHandler) idFromPath(p string) (ChunkID, error) { - ext := CompressedChunkExt - if !h.compressed { - if strings.HasSuffix(p, CompressedChunkExt) { - return ChunkID{}, errors.New("compressed chunk requested from http chunk store serving uncompressed chunks") - } - ext = UncompressedChunkExt + ext := h.converters.storageExtension() + if !strings.HasSuffix(p, ext) { + return ChunkID{}, errors.New("invalid chunk type, verify compression and encryption settings") } sID := strings.TrimSuffix(path.Base(p), ext) if len(sID) < 4 { diff --git a/local.go b/local.go index e84738f..3e71165 100644 --- a/local.go +++ b/local.go @@ -131,6 +131,7 @@ func (s LocalStore) Verify(ctx context.Context, n int, repair bool, w io.Writer) // Go trough all chunks underneath Base, filtering out other files, then feed // the IDs to the workers + extension := s.converters.storageExtension() err := filepath.Walk(s.Base, func(path string, info os.FileInfo, err error) error { // See if we're meant to stop select { @@ -145,18 +146,10 @@ func (s LocalStore) Verify(ctx context.Context, n int, repair bool, w io.Writer) return nil } // Skip compressed chunks if this is running in uncompressed mode and vice-versa - var sID string - if s.opt.Uncompressed { - if !strings.HasSuffix(path, UncompressedChunkExt) { - return nil - } - sID = strings.TrimSuffix(filepath.Base(path), UncompressedChunkExt) - } else { - if !strings.HasSuffix(path, CompressedChunkExt) { - return nil - } - sID = strings.TrimSuffix(filepath.Base(path), CompressedChunkExt) + if !strings.HasSuffix(filepath.Base(path), extension) { + return nil } + sID := strings.TrimSuffix(filepath.Base(path), extension) // Convert the name into a checksum, if that fails we're probably not looking // at a chunk file and should skip it. id, err := ChunkIDFromString(sID) @@ -175,6 +168,7 @@ func (s LocalStore) Verify(ctx context.Context, n int, repair bool, w io.Writer) // Prune removes any chunks from the store that are not contained in a list // of chunks func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error { + extension := s.converters.storageExtension() // Go trough all chunks underneath Base, filtering out other directories and files err := filepath.Walk(s.Base, func(path string, info os.FileInfo, err error) error { // See if we're meant to stop @@ -195,20 +189,11 @@ func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error { _ = os.Remove(path) return nil } - // Skip compressed chunks if this is running in uncompressed mode and vice-versa - var sID string - if s.opt.Uncompressed { - if !strings.HasSuffix(path, UncompressedChunkExt) { - return nil - } - sID = strings.TrimSuffix(filepath.Base(path), UncompressedChunkExt) - } else { - if !strings.HasSuffix(path, CompressedChunkExt) { - return nil - } - sID = strings.TrimSuffix(filepath.Base(path), CompressedChunkExt) + if !strings.HasSuffix(filepath.Base(path), extension) { + return nil } + sID := strings.TrimSuffix(filepath.Base(path), extension) // Convert the name into a checksum, if that fails we're probably not looking // at a chunk file and should skip it. id, err := ChunkIDFromString(sID) @@ -250,11 +235,6 @@ func (s LocalStore) Close() error { return nil } func (s LocalStore) nameFromID(id ChunkID) (dir, name string) { sID := id.String() dir = filepath.Join(s.Base, sID[0:4]) - name = filepath.Join(dir, sID) - if s.opt.Uncompressed { - name += UncompressedChunkExt - } else { - name += CompressedChunkExt - } + name = filepath.Join(dir, sID) + s.converters.storageExtension() return } diff --git a/local_test.go b/local_test.go index 9b79302..0105d4a 100644 --- a/local_test.go +++ b/local_test.go @@ -282,11 +282,12 @@ func TestLocalStorePasswordMismatch(t *testing.T) { require.NoError(t, err) require.Equal(t, dataIn, dataOut) - // Try to get the chunk with a bad password, expect a signature mismatch + // Try to get the chunk with a bad password, expect a not-found + // since the chunk extensions are different for diff keys. _, err = s2.GetChunk(id) require.Error(t, err) - if _, ok := err.(ChunkInvalid); !ok { - t.Fatalf("expected ChunkInvalid error, but got %T", err) + if _, ok := err.(ChunkMissing); !ok { + t.Fatalf("expected ChunkMissing error, but got %T", err) } } diff --git a/remotehttp.go b/remotehttp.go index 6789c47..b29aee9 100644 --- a/remotehttp.go +++ b/remotehttp.go @@ -256,11 +256,6 @@ func (r *RemoteHTTP) StoreChunk(chunk *Chunk) error { func (r *RemoteHTTP) nameFromID(id ChunkID) string { sID := id.String() - name := path.Join(sID[0:4], sID) - if r.opt.Uncompressed { - name += UncompressedChunkExt - } else { - name += CompressedChunkExt - } + name := path.Join(sID[0:4], sID) + r.converters.storageExtension() return name } diff --git a/s3.go b/s3.go index 6dbd429..45c48f7 100644 --- a/s3.go +++ b/s3.go @@ -192,28 +192,16 @@ func (s S3Store) Prune(ctx context.Context, ids map[ChunkID]struct{}) error { func (s S3Store) nameFromID(id ChunkID) string { sID := id.String() - name := s.prefix + sID[0:4] + "/" + sID - if s.opt.Uncompressed { - name += UncompressedChunkExt - } else { - name += CompressedChunkExt - } + name := s.prefix + sID[0:4] + "/" + sID + s.converters.storageExtension() return name } func (s S3Store) idFromName(name string) (ChunkID, error) { - var n string - if s.opt.Uncompressed { - if !strings.HasSuffix(name, UncompressedChunkExt) { - return ChunkID{}, fmt.Errorf("object %s is not a chunk", name) - } - n = strings.TrimSuffix(strings.TrimPrefix(name, s.prefix), UncompressedChunkExt) - } else { - if !strings.HasSuffix(name, CompressedChunkExt) { - return ChunkID{}, fmt.Errorf("object %s is not a chunk", name) - } - n = strings.TrimSuffix(strings.TrimPrefix(name, s.prefix), CompressedChunkExt) + extension := s.converters.storageExtension() + if !strings.HasSuffix(name, extension) { + return ChunkID{}, fmt.Errorf("object %s is not a chunk", name) } + n := strings.TrimSuffix(strings.TrimPrefix(name, s.prefix), extension) fragments := strings.Split(n, "/") if len(fragments) != 2 { return ChunkID{}, fmt.Errorf("incorrect chunk name for object %s", name) diff --git a/sftp.go b/sftp.go index d869a0a..98ff429 100644 --- a/sftp.go +++ b/sftp.go @@ -23,11 +23,12 @@ var _ WriteStore = &SFTPStore{} // SFTPStoreBase is the base object for SFTP chunk and index stores. type SFTPStoreBase struct { - location *url.URL - path string - client *sftp.Client - cancel context.CancelFunc - opt StoreOptions + location *url.URL + path string + client *sftp.Client + cancel context.CancelFunc + opt StoreOptions + extension string } // SFTPStore is a chunk store that uses SFTP over SSH. @@ -39,7 +40,7 @@ type SFTPStore struct { } // Creates a base sftp client -func newSFTPStoreBase(location *url.URL, opt StoreOptions) (*SFTPStoreBase, error) { +func newSFTPStoreBase(location *url.URL, opt StoreOptions, extension string) (*SFTPStoreBase, error) { sshCmd := os.Getenv("CASYNC_SSH_PATH") if sshCmd == "" { sshCmd = "ssh" @@ -82,7 +83,7 @@ func newSFTPStoreBase(location *url.URL, opt StoreOptions) (*SFTPStoreBase, erro cancel() return nil, errors.Wrapf(err, "failed to stat '%s'", path) } - return &SFTPStoreBase{location, path, client, cancel, opt}, nil + return &SFTPStoreBase{location, path, client, cancel, opt, extension}, nil } // StoreObject adds a new object to a writable index or chunk store. @@ -132,12 +133,7 @@ func (s *SFTPStoreBase) String() string { // Returns the path for a chunk func (s *SFTPStoreBase) nameFromID(id ChunkID) string { sID := id.String() - name := s.path + sID[0:4] + "/" + sID - if s.opt.Uncompressed { - name += UncompressedChunkExt - } else { - name += CompressedChunkExt - } + name := s.path + sID[0:4] + "/" + sID + s.extension return name } @@ -147,9 +143,10 @@ func NewSFTPStore(location *url.URL, opt StoreOptions) (*SFTPStore, error) { if err != nil { return nil, err } + extension := Converters(converters).storageExtension() s := &SFTPStore{make(chan *SFTPStoreBase, opt.N), location, opt.N, converters} for i := 0; i < opt.N; i++ { - c, err := newSFTPStoreBase(location, opt) + c, err := newSFTPStoreBase(location, opt, extension) if err != nil { return nil, err } @@ -219,6 +216,7 @@ func (s *SFTPStore) HasChunk(id ChunkID) (bool, error) { // Prune removes any chunks from the store that are not contained in a list // of chunks func (s *SFTPStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error { + extension := s.converters.storageExtension() c := <-s.pool defer func() { s.pool <- c }() walker := c.client.Walk(c.path) @@ -238,23 +236,11 @@ func (s *SFTPStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error { continue } path := walker.Path() - if !strings.HasSuffix(path, CompressedChunkExt) { // Skip files without chunk extension + if !strings.HasSuffix(path, extension) { // Skip files without expected chunk extension continue } - // Skip compressed chunks if this is running in uncompressed mode and vice-versa - var sID string - if c.opt.Uncompressed { - if !strings.HasSuffix(path, UncompressedChunkExt) { - return nil - } - sID = strings.TrimSuffix(filepath.Base(path), UncompressedChunkExt) - } else { - if !strings.HasSuffix(path, CompressedChunkExt) { - return nil - } - sID = strings.TrimSuffix(filepath.Base(path), CompressedChunkExt) - } - // Convert the name into a checksum, if that fails we're probably not looking + sID := strings.TrimSuffix(filepath.Base(path), extension) + // Convert the name into a hash, if that fails we're probably not looking // at a chunk file and should skip it. id, err := ChunkIDFromString(sID) if err != nil { diff --git a/sftpindex.go b/sftpindex.go index f637d04..de2549f 100644 --- a/sftpindex.go +++ b/sftpindex.go @@ -17,7 +17,7 @@ type SFTPIndexStore struct { // NewSFTPIndexStore initializes and index store backed by SFTP over SSH. func NewSFTPIndexStore(location *url.URL, opt StoreOptions) (*SFTPIndexStore, error) { - b, err := newSFTPStoreBase(location, opt) + b, err := newSFTPStoreBase(location, opt, "") if err != nil { return nil, err } From 05dccb71b8a9a83115855e761c600086f9c7fef9 Mon Sep 17 00:00:00 2001 From: folbrich Date: Wed, 30 Dec 2020 12:41:14 -0700 Subject: [PATCH 5/9] shorten the example to better fit in the readme --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 762ed8a..26c39ef 100644 --- a/README.md +++ b/README.md @@ -242,10 +242,10 @@ Chunk extensions are chosen based on compression or encryption settings as follo | Compressed | Encrypted | Extension | Example | |:---:|:---:|:---:|:---:| -| no | no | n/a | `fbef/fbef1a00ceda67e2abc49f707fd70e315fab60eacd19c257e23897339280ce78` | -| yes | no | `.cacnk` | `ffbef/fbef1a00ceda67e2abc49f707fd70e315fab60eacd19c257e23897339280ce78.cacnk` | -| no | yes | `.-` | `fbef/fbef1a00ceda67e2abc49f707fd70e315fab60eacd19c257e23897339280ce78.aes-256-ctr-635af003` | -| yes | yes | `.cacnk.-` | `fbef/fbef1a00ceda67e2abc49f707fd70e315fab60eacd19c257e23897339280ce78.cacnk.aes-256-ctr-635af003` | +| no | no | n/a | `fbef/fbef1a00ced..9280ce78` | +| yes | no | `.cacnk` | `ffbef/fbef1a00ced..9280ce78.cacnk` | +| no | yes | `.-` | `fbef/fbef1a00ced..9280ce78.aes-256-ctr-635af003` | +| yes | yes | `.cacnk.-` | `fbef/fbef1a00ced..9280ce78.cacnk.aes-256-ctr-635af003` | ### Configuration From 2837ee1f426eaf0594454a12f717e224b68c8adb Mon Sep 17 00:00:00 2001 From: folbrich Date: Thu, 31 Dec 2020 16:31:04 -0700 Subject: [PATCH 6/9] Add XChaCha20-Poly1305 (default) and AES-256-GCM --- README.md | 17 ++++-- cmd/desync/chunkserver.go | 29 ++++------ encrypt.go | 114 ++++++++++++++++++++++++++++++++++++++ encrypt_test.go | 64 ++++++++++++--------- gcs.go | 2 +- httphandler_test.go | 2 +- local.go | 2 +- local_test.go | 2 +- remotehttp.go | 2 +- remotehttp_test.go | 2 +- s3.go | 2 +- sftp.go | 2 +- store.go | 22 ++++++-- 13 files changed, 202 insertions(+), 60 deletions(-) diff --git a/README.md b/README.md index 26c39ef..c07e80e 100644 --- a/README.md +++ b/README.md @@ -237,15 +237,22 @@ Compressed and uncompressed chunks can live in the same store and don't interfer ### Chunk Encryption Chunks can be encrypted with a symmetric algorithm on a per-store basis. To use encryption, it has to be enabled in the [configuration](Configuration) file, and an algorithm needs to be specified. A single instance of desync can use multiple stores at the same time, each with a different (or the same) encryption mode and key. Encrypted chunks are stores with file extensions containing the algorithm and a key identifier. If the password for a store is changed, all existing chunks in it will become "invisible" since the extension would no longer match. To change the key, chunks have to be re-encrypted with the new key. That could happen into same, or better, a new store. Create a new store, then either re-chunk the data, or use `desync cache -c -s ` to decrypt the chunks from the old store and re-encrypt with the new key in the new store. +For all available algorithms, the 256bit encryption key is derived from the configured password by hashing it with SHA256. Encryption nonces or IVs are generated randomly per chunk which can weaken encryption in some modes when used on very large chunk stores, see notes below. -Chunk extensions are chosen based on compression or encryption settings as follows: +| ID | Algorithm | Notes | +|:---:|:---:|:---:| +| `xchacha20-poly1305` | XChaCha20-Poly1305 (AEAD) | Default | +| `aes-256-gcm` | AES 256bit Galois Counter Mode (AEAD) | Don't use for large chunk stores (>232 chunks) | +| `aes-256-ctr` | AES 256bit Counter Mode | Don't use for large chunk stores (>232 chunks) | + +Chunk extensions in stores are chosen based on compression or encryption settings as follows: | Compressed | Encrypted | Extension | Example | |:---:|:---:|:---:|:---:| | no | no | n/a | `fbef/fbef1a00ced..9280ce78` | | yes | no | `.cacnk` | `ffbef/fbef1a00ced..9280ce78.cacnk` | -| no | yes | `.-` | `fbef/fbef1a00ced..9280ce78.aes-256-ctr-635af003` | -| yes | yes | `.cacnk.-` | `fbef/fbef1a00ced..9280ce78.cacnk.aes-256-ctr-635af003` | +| no | yes | `.-` | `fbef/fbef1a00ced..9280ce78.aes-256-gcm-635af003` | +| yes | yes | `.cacnk.-` | `fbef/fbef1a00ced..9280ce78.cacnk.aes-256-gcm-635af003` | ### Configuration @@ -269,7 +276,7 @@ Available configuration values: - `http-auth` - Value of the Authorization header in HTTP requests. This could be a bearer token with `"Bearer "` or a Base64-encoded username and password pair for basic authentication like `"Basic dXNlcjpwYXNzd29yZAo="`. - `encryption` - Must be set to `true` to encrypt chunks in the store. - `encryption-password` - Encryption password to use for all chunks in the store. - - `encryption-algorithm` - Optional, symmetric encryption algorithm. Default `aes-256-ctr`. + - `encryption-algorithm` - Optional, symmetric encryption algorithm. Default `xchacha20-poly1305`. #### Example config @@ -307,7 +314,7 @@ Available configuration values: }, "/path/to/encrypted/store": { "encryption": true, - "encryption-algorithm": "aes-256-ctr", + "encryption-algorithm": "xchacha20-poly1305", "encryption-password": "mystorepassword" } } diff --git a/cmd/desync/chunkserver.go b/cmd/desync/chunkserver.go index 57a1651..8f6423b 100644 --- a/cmd/desync/chunkserver.go +++ b/cmd/desync/chunkserver.go @@ -71,7 +71,7 @@ needing to restart the server. This can be done under load as well. flags.BoolVarP(&opt.uncompressed, "uncompressed", "u", false, "serve uncompressed chunks") flags.StringVar(&opt.logFile, "log", "", "request log file or - for STDOUT") flags.StringVar(&opt.encryptionPw, "encryption-password", "", "serve chunks encrypted with this password") - flags.StringVar(&opt.encryptionAlg, "encryption-algorithm", "aes-256-ctr", "encryption algorithm") + flags.StringVar(&opt.encryptionAlg, "encryption-algorithm", "xchacha20-poly1305", "encryption algorithm") addStoreOptions(&opt.cmdStoreOptions, flags) addServerOptions(&opt.cmdServerOptions, flags) return cmd @@ -132,22 +132,17 @@ func runChunkServer(ctx context.Context, opt chunkServerOptions, args []string) defer s.Close() // Build the converters. In this case, the "storage" side is what is served - // up by the server towards the client. - var converters desync.Converters - if !opt.uncompressed { - converters = append(converters, desync.Compressor{}) - } - if opt.encryptionPw != "" { - switch opt.encryptionAlg { - case "", "aes-256-ctr": - enc, err := desync.NewAES256CTR(opt.encryptionPw) - if err != nil { - return err - } - converters = append(converters, enc) - default: - return fmt.Errorf("unsupported encryption algorithm %q", opt.encryptionAlg) - } + // up by the server towards the client. The StoreOptions struct already has + // logic to build the converters from options so use that instead of repeating + // it here. + converters, err := desync.StoreOptions{ + Uncompressed: opt.uncompressed, + Encryption: opt.encryptionPw != "", + EncryptionAlgorithm: opt.encryptionAlg, + EncryptionPassword: opt.encryptionPw, + }.StorageConverters() + if err != nil { + return err } handler := desync.NewHTTPHandler(s, opt.writable, opt.skipVerifyWrite, converters, opt.auth) diff --git a/encrypt.go b/encrypt.go index 506cad6..bfc7b9a 100644 --- a/encrypt.go +++ b/encrypt.go @@ -9,6 +9,8 @@ import ( "errors" "fmt" "io" + + "golang.org/x/crypto/chacha20poly1305" ) // aes256ctr is an encryption layer for chunk storage. It @@ -69,3 +71,115 @@ func (d aes256ctr) equal(c converter) bool { func (d aes256ctr) storageExtension() string { return d.extension } + +// xchacha20poly1305 is an encryption layer for chunk storage. It +// encrypts/decrypts to/from storage using ChaCha20-Poly1305 AEAD. +// The key is generated from a passphrase with SHA256. +type xchacha20poly1305 struct { + key []byte + aead cipher.AEAD + + // Chunk extension with identifier derived from the key. + extension string +} + +var _ converter = aes256ctr{} + +func NewXChaCha20Poly1305(passphrase string) (xchacha20poly1305, error) { + key := sha256.Sum256([]byte(passphrase)) + keyHash := sha256.Sum256(key[:]) + extension := fmt.Sprintf(".xchacha20-poly1305-%x", keyHash[:4]) + aead, err := chacha20poly1305.NewX(key[:]) + return xchacha20poly1305{key: key[:], aead: aead, extension: extension}, err +} + +// encrypt for storage. The nonce is prepended to the data. +func (d xchacha20poly1305) toStorage(in []byte) ([]byte, error) { + out := make([]byte, d.aead.NonceSize(), d.aead.NonceSize()+len(in)+d.aead.Overhead()) + nonce := out[:d.aead.NonceSize()] + if _, err := rand.Read(nonce); err != nil { + return nil, err + } + return d.aead.Seal(out, nonce, in, nil), nil +} + +// decrypt from storage. The nonce is taken from the start of the +// chunk data. This by itself does not verify integrity. That +// is achieved by the existing chunk validation. +func (d xchacha20poly1305) fromStorage(in []byte) ([]byte, error) { + if len(in) < d.aead.NonceSize() { + return nil, errors.New("no nonce prefix found in chunk, not encrypted or wrong algorithm") + } + nonce := in[:d.aead.NonceSize()] + return d.aead.Open(nil, nonce, in[d.aead.NonceSize():], nil) +} + +func (d xchacha20poly1305) equal(c converter) bool { + other, ok := c.(xchacha20poly1305) + if !ok { + return false + } + return bytes.Equal(d.key, other.key) +} + +func (d xchacha20poly1305) storageExtension() string { + return d.extension +} + +// aes256gcm is an encryption layer for chunk storage. It +// encrypts/decrypts to/from storage using AES 256 GCM. +// The key is generated from a passphrase with SHA256. +type aes256gcm struct { + key []byte + aead cipher.AEAD + + // Chunk extension with identifier derived from the key. + extension string +} + +var _ converter = aes256ctr{} + +func NewAES256GCM(passphrase string) (aes256gcm, error) { + key := sha256.Sum256([]byte(passphrase)) + keyHash := sha256.Sum256(key[:]) + extension := fmt.Sprintf(".aes-256-gcm-%x", keyHash[:4]) + block, err := aes.NewCipher(key[:]) + if err != nil { + return aes256gcm{}, err + } + aead, err := cipher.NewGCM(block) + return aes256gcm{key: key[:], aead: aead, extension: extension}, err +} + +// encrypt for storage. The nonce is prepended to the data. +func (d aes256gcm) toStorage(in []byte) ([]byte, error) { + out := make([]byte, d.aead.NonceSize(), d.aead.NonceSize()+len(in)+d.aead.Overhead()) + nonce := out[:d.aead.NonceSize()] + if _, err := rand.Read(nonce); err != nil { + return nil, err + } + return d.aead.Seal(out, nonce, in, nil), nil +} + +// decrypt from storage. The nonce is taken from the start of the +// chunk data. This by itself does not verify integrity. That +// is achieved by the existing chunk validation. +func (d aes256gcm) fromStorage(in []byte) ([]byte, error) { + if len(in) < d.aead.NonceSize() { + return nil, errors.New("no nonce prefix found in chunk, not encrypted or wrong algorithm") + } + nonce := in[:d.aead.NonceSize()] + return d.aead.Open(nil, nonce, in[d.aead.NonceSize():], nil) +} + +func (d aes256gcm) equal(c converter) bool { + other, ok := c.(aes256gcm) + if !ok { + return false + } + return bytes.Equal(d.key, other.key) +} + +func (d aes256gcm) storageExtension() string { + return d.extension +} diff --git a/encrypt_test.go b/encrypt_test.go index 3466788..9d39e4b 100644 --- a/encrypt_test.go +++ b/encrypt_test.go @@ -6,38 +6,52 @@ import ( "github.com/stretchr/testify/require" ) -func TestAES256CTREncryptDecrypt(t *testing.T) { - plainIn := []byte{1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} +func TestEncryptDecrypt(t *testing.T) { + tests := map[string]struct { + alg func(string) (converter, error) + }{ + "xchacha20-poly1305": {func(pw string) (converter, error) { return NewXChaCha20Poly1305(pw) }}, + "aes-256-gcm": {func(pw string) (converter, error) { return NewAES256GCM(pw) }}, + "aes-256-ctr": {func(pw string) (converter, error) { return NewAES256CTR(pw) }}, + } - // Make two converters. One for encryption and one for decryption. Could use - // just one but this way we confirm the key generation is consistent - enc, err := NewAES256CTR("secret-password") - require.NoError(t, err) - dec, err := NewAES256CTR("secret-password") - require.NoError(t, err) + for name, test := range tests { + t.Run(name, func(t *testing.T) { + plainIn := []byte{1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} - // Encrypt the data - ciphertext, err := enc.toStorage(plainIn) - require.NoError(t, err) + // Make two converters. One for encryption and one for decryption. Could use + // just one but this way we confirm the key generation is consistent + enc, err := test.alg("secret-password") + require.NoError(t, err) + dec, err := test.alg("secret-password") + require.NoError(t, err) - // Confirm the ciphertext is actually different than what went in - require.NotEqual(t, plainIn, ciphertext) + // Encrypt the data + ciphertext, err := enc.toStorage(plainIn) + require.NoError(t, err) - // Decrypt it - plainOut, err := dec.fromStorage(ciphertext) - require.NoError(t, err) + // Confirm the ciphertext is actually different than what went in + require.NotEqual(t, plainIn, ciphertext) - // This should match the original data of course - require.Equal(t, plainIn, plainOut) + // Decrypt it + plainOut, err := dec.fromStorage(ciphertext) + require.NoError(t, err) - // Make another instance with a different password - diffPw, err := NewAES256CTR("something-else") - require.NoError(t, err) + // This should match the original data of course + require.Equal(t, plainIn, plainOut) - // Try to decrypt the data, should end up with garbage - diffOut, err := diffPw.fromStorage(ciphertext) - require.NoError(t, err) - require.NotEqual(t, plainIn, diffOut) + // Make another instance with a different password + diffPw, err := test.alg("something-else") + require.NoError(t, err) + + // Try to decrypt the data, should end up with garbage or an + // error from AEAD algorithms + diffOut, err := diffPw.fromStorage(ciphertext) + if err == nil { + require.NotEqual(t, plainIn, diffOut) + } + }) + } } func TestAES256CTRCompare(t *testing.T) { diff --git a/gcs.go b/gcs.go index ab6275c..2a7e13c 100644 --- a/gcs.go +++ b/gcs.go @@ -54,7 +54,7 @@ func normalizeGCPrefix(path string) string { // backed by Google Storage. func NewGCStoreBase(u *url.URL, opt StoreOptions) (GCStoreBase, error) { ctx := context.TODO() - converters, err := opt.converters() + converters, err := opt.StorageConverters() if err != nil { return GCStoreBase{}, err } diff --git a/httphandler_test.go b/httphandler_test.go index 93e368c..6a6f931 100644 --- a/httphandler_test.go +++ b/httphandler_test.go @@ -111,7 +111,7 @@ func TestHTTPHandlerEncryption(t *testing.T) { require.NoError(t, err) // Start a read-write capable server with Encryption, no Compression - enc, err := NewAES256CTR("testpassword") + enc, err := NewXChaCha20Poly1305("testpassword") require.NoError(t, err) server := httptest.NewServer(NewHTTPHandler(upstream, true, false, []converter{enc}, "")) defer server.Close() diff --git a/local.go b/local.go index 3e71165..4bc6b23 100644 --- a/local.go +++ b/local.go @@ -42,7 +42,7 @@ func NewLocalStore(dir string, opt StoreOptions) (LocalStore, error) { if !info.IsDir() { return LocalStore{}, fmt.Errorf("%s is not a directory", dir) } - converters, err := opt.converters() + converters, err := opt.StorageConverters() if err != nil { return LocalStore{}, err } diff --git a/local_test.go b/local_test.go index 0105d4a..b0d9211 100644 --- a/local_test.go +++ b/local_test.go @@ -235,7 +235,7 @@ func TestLocalStoreCompressedEncrypted(t *testing.T) { require.NoError(t, err) // First decrypt it, using the correct password - dec, _ := NewAES256CTR("test-password") + dec, _ := NewXChaCha20Poly1305("test-password") decrypted, err := dec.fromStorage(b) require.NoError(t, err) diff --git a/remotehttp.go b/remotehttp.go index b29aee9..8142359 100644 --- a/remotehttp.go +++ b/remotehttp.go @@ -89,7 +89,7 @@ func NewRemoteHTTPStoreBase(location *url.URL, opt StoreOptions) (*RemoteHTTPBas } client := &http.Client{Transport: tr, Timeout: timeout} - converters, err := opt.converters() + converters, err := opt.StorageConverters() if err != nil { return nil, err } diff --git a/remotehttp_test.go b/remotehttp_test.go index bdc9c6a..5e5cf14 100644 --- a/remotehttp_test.go +++ b/remotehttp_test.go @@ -331,7 +331,7 @@ func TestRemoteHTTPPutEncrypted(t *testing.T) { // If everything worked, the request body should be the chunk data, first // compressed, then encrypted. Unwind it manually to check the layers are in order. - dec, err := NewAES256CTR("testpassword") + dec, err := NewXChaCha20Poly1305("testpassword") require.NoError(t, err) decrypted, err := dec.fromStorage(body.Bytes()) require.NoError(t, err) diff --git a/s3.go b/s3.go index 45c48f7..cf75520 100644 --- a/s3.go +++ b/s3.go @@ -32,7 +32,7 @@ type S3Store struct { // NewS3StoreBase initializes a base object used for chunk or index stores backed by S3. func NewS3StoreBase(u *url.URL, s3Creds *credentials.Credentials, region string, opt StoreOptions, lookupType minio.BucketLookupType) (S3StoreBase, error) { - converters, err := opt.converters() + converters, err := opt.StorageConverters() if err != nil { return S3StoreBase{}, err } diff --git a/sftp.go b/sftp.go index 98ff429..cd21058 100644 --- a/sftp.go +++ b/sftp.go @@ -139,7 +139,7 @@ func (s *SFTPStoreBase) nameFromID(id ChunkID) string { // NewSFTPStore initializes a chunk store using SFTP over SSH. func NewSFTPStore(location *url.URL, opt StoreOptions) (*SFTPStore, error) { - converters, err := opt.converters() + converters, err := opt.StorageConverters() if err != nil { return nil, err } diff --git a/store.go b/store.go index 366892a..c63142e 100644 --- a/store.go +++ b/store.go @@ -88,19 +88,19 @@ type StoreOptions struct { // Store and read chunks uncompressed, without chunk file extension Uncompressed bool `json:"uncompressed"` - // Store encryption settings. The only algorithm currently supported is aes-256-ctr which - // is also the default. + // Store encryption settings. Currently supported algorithms are xchacha20-poly1305 (default) + // aes-256-gcm, and aes-256-ctr. Encryption bool `json:"encryption,omitempty"` EncryptionAlgorithm string `json:"encryption-algorithm,omitempty"` EncryptionPassword string `json:"encryption-password,omitempty"` } -// Returns data converters that convert between plain and storage-format. Each layer +// Returns data StorageConverters that convert between plain and storage-format. Each layer // represents a modification such as compression or encryption and is applied in order // depending the direction of data. If data is written to storage, the layer's toStorage // method is called in the order they are defined. If data is read, the fromStorage // method is called in reverse order. -func (o StoreOptions) converters() ([]converter, error) { +func (o StoreOptions) StorageConverters() ([]converter, error) { var c []converter if !o.Uncompressed { c = append(c, Compressor{}) @@ -110,7 +110,19 @@ func (o StoreOptions) converters() ([]converter, error) { return nil, errors.New("no encryption password configured") } switch o.EncryptionAlgorithm { - case "", "aes-256-ctr": + case "", "xchacha20-poly1305": + enc, err := NewXChaCha20Poly1305(o.EncryptionPassword) + if err != nil { + return nil, err + } + c = append(c, enc) + case "aes-256-gcm": + enc, err := NewAES256GCM(o.EncryptionPassword) + if err != nil { + return nil, err + } + c = append(c, enc) + case "aes-256-ctr": enc, err := NewAES256CTR(o.EncryptionPassword) if err != nil { return nil, err From 1c82e632c23e2041a6f1a37e9d64fa2889da3b53 Mon Sep 17 00:00:00 2001 From: folbrich Date: Thu, 31 Dec 2020 16:40:22 -0700 Subject: [PATCH 7/9] Update readme --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c07e80e..5bb557a 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Among the distinguishing factors: - Built-in HTTP(S) index server to read/write indexes - Reflinking matching blocks (rather than copying) from seed files if supported by the filesystem (currently only Btrfs and XFS) - catar archives can be created from standard tar archives, and they can also be extracted to GNU tar format. -- Optional chunk store encryption with AES-265-CTR. +- Optional chunk store encryption with XChaCha20-Poly1305, AES-265-GCM and AES-265-CTR. ## Terminology @@ -239,11 +239,11 @@ Compressed and uncompressed chunks can live in the same store and don't interfer Chunks can be encrypted with a symmetric algorithm on a per-store basis. To use encryption, it has to be enabled in the [configuration](Configuration) file, and an algorithm needs to be specified. A single instance of desync can use multiple stores at the same time, each with a different (or the same) encryption mode and key. Encrypted chunks are stores with file extensions containing the algorithm and a key identifier. If the password for a store is changed, all existing chunks in it will become "invisible" since the extension would no longer match. To change the key, chunks have to be re-encrypted with the new key. That could happen into same, or better, a new store. Create a new store, then either re-chunk the data, or use `desync cache -c -s ` to decrypt the chunks from the old store and re-encrypt with the new key in the new store. For all available algorithms, the 256bit encryption key is derived from the configured password by hashing it with SHA256. Encryption nonces or IVs are generated randomly per chunk which can weaken encryption in some modes when used on very large chunk stores, see notes below. -| ID | Algorithm | Notes | -|:---:|:---:|:---:| -| `xchacha20-poly1305` | XChaCha20-Poly1305 (AEAD) | Default | -| `aes-256-gcm` | AES 256bit Galois Counter Mode (AEAD) | Don't use for large chunk stores (>232 chunks) | -| `aes-256-ctr` | AES 256bit Counter Mode | Don't use for large chunk stores (>232 chunks) | +| ID | Algorithm | Key | Nonce/IV | Notes | +|:---:|:---:|:---:|:---:|:---:| +| `xchacha20-poly1305` | XChaCha20-Poly1305 (AEAD) | 256bit | 192bit | Default | +| `aes-256-gcm` | AES 256bit Galois Counter Mode (AEAD) | 256bit | 128bit | Don't use for large chunk stores (>232 chunks) | +| `aes-256-ctr` | AES 256bit Counter Mode | 256bit | 128bit | Don't use for large chunk stores (>232 chunks) | Chunk extensions in stores are chosen based on compression or encryption settings as follows: From 5961612e98281e4f3fe1702a61d1d2f76acb3124 Mon Sep 17 00:00:00 2001 From: folbrich Date: Thu, 31 Dec 2020 16:46:33 -0700 Subject: [PATCH 8/9] Typo --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5bb557a..8069031 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Among the distinguishing factors: - Built-in HTTP(S) index server to read/write indexes - Reflinking matching blocks (rather than copying) from seed files if supported by the filesystem (currently only Btrfs and XFS) - catar archives can be created from standard tar archives, and they can also be extracted to GNU tar format. -- Optional chunk store encryption with XChaCha20-Poly1305, AES-265-GCM and AES-265-CTR. +- Optional chunk store encryption with XChaCha20-Poly1305, AES-265-GCM or AES-265-CTR. ## Terminology @@ -70,7 +70,7 @@ catar archives can also be extracted to GNU tar archive streams. All files in th ## Tool -The tool is provided for convenience. It uses the desync library and makes most features of it available in a consistent fashion. It does not match upsteam casync's syntax exactly, but tries to be similar at least. +The tool is provided for convenience. It uses the desync library and makes most features of it available in a consistent fashion. It does not match upstream casync's syntax exactly, but tries to be similar at least. ### Installation From fa43d2e12b0e97f49288e67dbfd4bcf8350dcbd8 Mon Sep 17 00:00:00 2001 From: folbrich Date: Sat, 9 Jan 2021 09:21:17 -0700 Subject: [PATCH 9/9] Remove AES-256-CTR --- README.md | 3 +-- encrypt.go | 64 ++----------------------------------------------- encrypt_test.go | 26 +++++++++----------- store.go | 8 +------ 4 files changed, 15 insertions(+), 86 deletions(-) diff --git a/README.md b/README.md index 8069031..483458b 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Among the distinguishing factors: - Built-in HTTP(S) index server to read/write indexes - Reflinking matching blocks (rather than copying) from seed files if supported by the filesystem (currently only Btrfs and XFS) - catar archives can be created from standard tar archives, and they can also be extracted to GNU tar format. -- Optional chunk store encryption with XChaCha20-Poly1305, AES-265-GCM or AES-265-CTR. +- Optional chunk store encryption with XChaCha20-Poly1305 or AES-265-GCM. ## Terminology @@ -243,7 +243,6 @@ For all available algorithms, the 256bit encryption key is derived from the conf |:---:|:---:|:---:|:---:|:---:| | `xchacha20-poly1305` | XChaCha20-Poly1305 (AEAD) | 256bit | 192bit | Default | | `aes-256-gcm` | AES 256bit Galois Counter Mode (AEAD) | 256bit | 128bit | Don't use for large chunk stores (>232 chunks) | -| `aes-256-ctr` | AES 256bit Counter Mode | 256bit | 128bit | Don't use for large chunk stores (>232 chunks) | Chunk extensions in stores are chosen based on compression or encryption settings as follows: diff --git a/encrypt.go b/encrypt.go index bfc7b9a..e4b1a92 100644 --- a/encrypt.go +++ b/encrypt.go @@ -8,70 +8,10 @@ import ( "crypto/sha256" "errors" "fmt" - "io" "golang.org/x/crypto/chacha20poly1305" ) -// aes256ctr is an encryption layer for chunk storage. It -// encrypts/decrypts to/from storage using aes-256-ctr. -// The key is generated from a passphrase with SHA256. -type aes256ctr struct { - key []byte - block cipher.Block - - // Chunk extension with identifier derived from the key. - extension string -} - -var _ converter = aes256ctr{} - -func NewAES256CTR(passphrase string) (aes256ctr, error) { - key := sha256.Sum256([]byte(passphrase)) - keyHash := sha256.Sum256(key[:]) - extension := fmt.Sprintf(".aes-256-ctr-%x", keyHash[:4]) - block, err := aes.NewCipher(key[:]) - return aes256ctr{key: key[:], block: block, extension: extension}, err -} - -// encrypt for storage. The IV is prepended to the data. -func (d aes256ctr) toStorage(in []byte) ([]byte, error) { - out := make([]byte, aes.BlockSize+len(in)) - iv := out[:aes.BlockSize] - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, err - } - stream := cipher.NewCTR(d.block, iv) - stream.XORKeyStream(out[aes.BlockSize:], in) - return out, nil -} - -// decrypt from storage. The IV is taken from the start of the -// chunk data. This by itself does not verify integrity. That -// is achieved by the existing chunk validation. -func (d aes256ctr) fromStorage(in []byte) ([]byte, error) { - if len(in) < aes.BlockSize { - return nil, errors.New("no iv prefix found in chunk, not encrypted or wrong algorithm") - } - out := make([]byte, len(in)-aes.BlockSize) - iv := in[:aes.BlockSize] - stream := cipher.NewCTR(d.block, iv) - stream.XORKeyStream(out, in[aes.BlockSize:]) - return out, nil -} - -func (d aes256ctr) equal(c converter) bool { - other, ok := c.(aes256ctr) - if !ok { - return false - } - return bytes.Equal(d.key, other.key) -} - -func (d aes256ctr) storageExtension() string { - return d.extension -} - // xchacha20poly1305 is an encryption layer for chunk storage. It // encrypts/decrypts to/from storage using ChaCha20-Poly1305 AEAD. // The key is generated from a passphrase with SHA256. @@ -83,7 +23,7 @@ type xchacha20poly1305 struct { extension string } -var _ converter = aes256ctr{} +var _ converter = xchacha20poly1305{} func NewXChaCha20Poly1305(passphrase string) (xchacha20poly1305, error) { key := sha256.Sum256([]byte(passphrase)) @@ -137,7 +77,7 @@ type aes256gcm struct { extension string } -var _ converter = aes256ctr{} +var _ converter = aes256gcm{} func NewAES256GCM(passphrase string) (aes256gcm, error) { key := sha256.Sum256([]byte(passphrase)) diff --git a/encrypt_test.go b/encrypt_test.go index 9d39e4b..88dd6df 100644 --- a/encrypt_test.go +++ b/encrypt_test.go @@ -12,7 +12,6 @@ func TestEncryptDecrypt(t *testing.T) { }{ "xchacha20-poly1305": {func(pw string) (converter, error) { return NewXChaCha20Poly1305(pw) }}, "aes-256-gcm": {func(pw string) (converter, error) { return NewAES256GCM(pw) }}, - "aes-256-ctr": {func(pw string) (converter, error) { return NewAES256CTR(pw) }}, } for name, test := range tests { @@ -44,23 +43,20 @@ func TestEncryptDecrypt(t *testing.T) { diffPw, err := test.alg("something-else") require.NoError(t, err) - // Try to decrypt the data, should end up with garbage or an - // error from AEAD algorithms - diffOut, err := diffPw.fromStorage(ciphertext) - if err == nil { - require.NotEqual(t, plainIn, diffOut) - } + // Try to decrypt the data, should get an error from AEAD algorithms + _, err = diffPw.fromStorage(ciphertext) + require.Error(t, err) }) } } -func TestAES256CTRCompare(t *testing.T) { +func TestAES256GCMCompare(t *testing.T) { // Make three converters. Two with the same, one with a diff password - enc1, err := NewAES256CTR("secret-password") + enc1, err := NewAES256GCM("secret-password") require.NoError(t, err) - enc2, err := NewAES256CTR("secret-password") + enc2, err := NewAES256GCM("secret-password") require.NoError(t, err) - diffPw, err := NewAES256CTR("something-else") + diffPw, err := NewAES256GCM("something-else") require.NoError(t, err) // Check equality method @@ -70,16 +66,16 @@ func TestAES256CTRCompare(t *testing.T) { require.False(t, enc1.equal(diffPw)) } -func TestAES256CTRExtension(t *testing.T) { - enc1, err := NewAES256CTR("secret-password") +func TestAES256GCMExtension(t *testing.T) { + enc1, err := NewAES256GCM("secret-password") require.NoError(t, err) // Confirm that we have a key-handle in the file extension - require.Equal(t, ".aes-256-ctr-16db3403", enc1.extension) + require.Equal(t, ".aes-256-gcm-16db3403", enc1.extension) // If algorithm and password are the same, the same key // handle (extension) should be produced every time - enc2, err := NewAES256CTR("secret-password") + enc2, err := NewAES256GCM("secret-password") require.NoError(t, err) require.Equal(t, enc1.extension, enc2.extension) } diff --git a/store.go b/store.go index c63142e..6c8fd9b 100644 --- a/store.go +++ b/store.go @@ -89,7 +89,7 @@ type StoreOptions struct { Uncompressed bool `json:"uncompressed"` // Store encryption settings. Currently supported algorithms are xchacha20-poly1305 (default) - // aes-256-gcm, and aes-256-ctr. + // and aes-256-gcm. Encryption bool `json:"encryption,omitempty"` EncryptionAlgorithm string `json:"encryption-algorithm,omitempty"` EncryptionPassword string `json:"encryption-password,omitempty"` @@ -122,12 +122,6 @@ func (o StoreOptions) StorageConverters() ([]converter, error) { return nil, err } c = append(c, enc) - case "aes-256-ctr": - enc, err := NewAES256CTR(o.EncryptionPassword) - if err != nil { - return nil, err - } - c = append(c, enc) default: return nil, fmt.Errorf("unsupported encryption algorithm %q", o.EncryptionAlgorithm) }