From 5908cc2a5dd0fac467bde030148e599096969364 Mon Sep 17 00:00:00 2001 From: Leonard Lyubich Date: Wed, 4 Feb 2026 12:45:12 +0300 Subject: [PATCH] sn/storage: Fix payload overflow in combined `FSTree.Get()` To avoid exceeding the payload's boundaries, all callers would have to perform checks on their side. Although it's more natural to just read until EOF, and this is what's happening now. Signed-off-by: Leonard Lyubich --- CHANGELOG.md | 1 + .../blobstor/fstree/fstree_test.go | 36 +++++++++++++++++++ .../blobstor/fstree/head.go | 28 +++++++++++---- 3 files changed, 59 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c7600ee38..0c5c5269b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Changelog for NeoFS Node ### Added ### Fixed +- Potential payload overflow on getting full object from combined FSTree file (#3801) ### Changed diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go index 219a85519e..2a109bc22e 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go @@ -1,12 +1,16 @@ package fstree import ( + "bytes" + "io" "testing" "testing/iotest" "github.com/nspcc-dev/neofs-node/internal/testutil" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/compression" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + "github.com/nspcc-dev/neofs-sdk-go/object" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test" "github.com/stretchr/testify/require" @@ -88,3 +92,35 @@ func testGetRangeStream(t *testing.T, fst *FSTree) { _, err = fst.GetRangeStream(addr, 1, pldLen-1) require.ErrorIs(t, err, apistatus.ErrObjectNotFound) } + +func TestFSTree_PutBatch(t *testing.T) { + fst := setupFSTree(t) + + const pldLen = object.MaxHeaderLen + + objs := make([]object.Object, 3) + batch := make(map[oid.Address][]byte) + for i := range objs { + objs[i] = objecttest.Object() + objs[i].SetPayloadSize(pldLen) + objs[i].SetPayload(testutil.RandByteSlice(pldLen)) + + batch[objs[i].Address()] = objs[i].Marshal() + } + + require.NoError(t, fst.PutBatch(batch)) + + for i := range objs { + hdr, stream, err := fst.GetStream(objs[i].Address()) + require.NoError(t, err) + t.Cleanup(func() { stream.Close() }) + + require.EqualValues(t, objs[i].CutPayload(), hdr) + + // note: iotest.TestReader does not fit due to overridden io.Seeker interface + b, err := io.ReadAll(stream) + require.NoError(t, err) + require.Len(t, b, pldLen) + require.True(t, bytes.Equal(objs[i].Payload(), b)) + } +} diff --git a/pkg/local_object_storage/blobstor/fstree/head.go b/pkg/local_object_storage/blobstor/fstree/head.go index d05db9ec80..52f549afae 100644 --- a/pkg/local_object_storage/blobstor/fstree/head.go +++ b/pkg/local_object_storage/blobstor/fstree/head.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/fs" + "math" "os" "github.com/klauspost/compress/zstd" @@ -63,12 +64,12 @@ func (t *FSTree) extractHeaderAndStream(id oid.ID, f *os.File) (*object.Object, return nil, f, err } if n < combinedDataOff { - return t.readHeaderAndPayload(f, buf[:n]) + return t.readHeaderAndPayload(f, buf[:n], false) } thisOID, l := parseCombinedPrefix(buf) if thisOID == nil { - return t.readHeaderAndPayload(f, buf[:n]) + return t.readHeaderAndPayload(f, buf[:n], false) } offset := combinedDataOff @@ -81,7 +82,7 @@ func (t *FSTree) extractHeaderAndStream(id oid.ID, f *os.File) (*object.Object, return nil, f, fmt.Errorf("read up to size: %w", err) } } - return t.readHeaderAndPayload(f, buf[offset:size]) + return t.readHeaderAndPayload(f, buf[offset:size], true) } offset += int(l) @@ -115,7 +116,7 @@ func (t *FSTree) extractHeaderAndStream(id oid.ID, f *os.File) (*object.Object, // readHeaderAndPayload reads an object header from the file and returns reader for payload. // This function takes ownership of the io.ReadCloser and will close it if it does not return it. -func (t *FSTree) readHeaderAndPayload(f io.ReadCloser, initial []byte) (*object.Object, io.ReadSeekCloser, error) { +func (t *FSTree) readHeaderAndPayload(f io.ReadCloser, initial []byte, combined bool) (*object.Object, io.ReadSeekCloser, error) { var err error if len(initial) < object.MaxHeaderLen { _ = f.Close() @@ -139,13 +140,13 @@ func (t *FSTree) readHeaderAndPayload(f io.ReadCloser, initial []byte) (*object. }, nil } - return t.readUntilPayload(f, initial) + return t.readUntilPayload(f, initial, combined) } // readUntilPayload reads an object from the file until the payload field is reached // and returns the object along with a reader for the remaining data. // This function takes ownership of the io.ReadCloser and will close it if it does not return it. -func (t *FSTree) readUntilPayload(f io.ReadCloser, initial []byte) (*object.Object, io.ReadSeekCloser, error) { +func (t *FSTree) readUntilPayload(f io.ReadCloser, initial []byte, combined bool) (*object.Object, io.ReadSeekCloser, error) { reader := f if t.IsCompressed(initial) { @@ -170,6 +171,21 @@ func (t *FSTree) readUntilPayload(f io.ReadCloser, initial []byte) (*object.Obje return nil, nil, fmt.Errorf("extract header and payload: %w", err) } + if combined { + if full := obj.PayloadSize(); full > uint64(len(payloadPrefix)) { + rem := full - uint64(len(payloadPrefix)) + if rem > math.MaxInt64 { + // we could give this control to the caller, but the case is purely theoretical + return nil, nil, fmt.Errorf("too big payload remainder %d bytes", rem) + } + + return obj, &payloadReader{ + Reader: io.MultiReader(bytes.NewReader(payloadPrefix), io.LimitReader(reader, int64(rem))), + close: reader.Close, + }, nil + } + } + return obj, &payloadReader{ Reader: io.MultiReader(bytes.NewReader(payloadPrefix), reader), close: reader.Close,