Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ Changelog for NeoFS Node
### Added

### Fixed
- Potential payload overflow on getting full object from combined FSTree file (#3801)

### Changed

Expand Down
36 changes: 36 additions & 0 deletions pkg/local_object_storage/blobstor/fstree/fstree_test.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
package fstree

import (
"bytes"
"io"
"testing"
"testing/iotest"

"github.com/nspcc-dev/neofs-node/internal/testutil"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/compression"
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
"github.com/nspcc-dev/neofs-sdk-go/object"
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test"
objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -88,3 +92,35 @@ func testGetRangeStream(t *testing.T, fst *FSTree) {
_, err = fst.GetRangeStream(addr, 1, pldLen-1)
require.ErrorIs(t, err, apistatus.ErrObjectNotFound)
}

func TestFSTree_PutBatch(t *testing.T) {
fst := setupFSTree(t)

const pldLen = object.MaxHeaderLen

objs := make([]object.Object, 3)
batch := make(map[oid.Address][]byte)
for i := range objs {
objs[i] = objecttest.Object()
objs[i].SetPayloadSize(pldLen)
objs[i].SetPayload(testutil.RandByteSlice(pldLen))

batch[objs[i].Address()] = objs[i].Marshal()
}

require.NoError(t, fst.PutBatch(batch))

for i := range objs {
hdr, stream, err := fst.GetStream(objs[i].Address())
require.NoError(t, err)
t.Cleanup(func() { stream.Close() })

require.EqualValues(t, objs[i].CutPayload(), hdr)

// note: iotest.TestReader does not fit due to overridden io.Seeker interface
b, err := io.ReadAll(stream)
require.NoError(t, err)
require.Len(t, b, pldLen)
require.True(t, bytes.Equal(objs[i].Payload(), b))
}
}
28 changes: 22 additions & 6 deletions pkg/local_object_storage/blobstor/fstree/head.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"
"io"
"io/fs"
"math"
"os"

"github.com/klauspost/compress/zstd"
Expand Down Expand Up @@ -63,12 +64,12 @@ func (t *FSTree) extractHeaderAndStream(id oid.ID, f *os.File) (*object.Object,
return nil, f, err
}
if n < combinedDataOff {
return t.readHeaderAndPayload(f, buf[:n])
return t.readHeaderAndPayload(f, buf[:n], false)
}

thisOID, l := parseCombinedPrefix(buf)
if thisOID == nil {
return t.readHeaderAndPayload(f, buf[:n])
return t.readHeaderAndPayload(f, buf[:n], false)
}

offset := combinedDataOff
Expand All @@ -81,7 +82,7 @@ func (t *FSTree) extractHeaderAndStream(id oid.ID, f *os.File) (*object.Object,
return nil, f, fmt.Errorf("read up to size: %w", err)
}
}
return t.readHeaderAndPayload(f, buf[offset:size])
return t.readHeaderAndPayload(f, buf[offset:size], true)
}

offset += int(l)
Expand Down Expand Up @@ -115,7 +116,7 @@ func (t *FSTree) extractHeaderAndStream(id oid.ID, f *os.File) (*object.Object,

// readHeaderAndPayload reads an object header from the file and returns reader for payload.
// This function takes ownership of the io.ReadCloser and will close it if it does not return it.
func (t *FSTree) readHeaderAndPayload(f io.ReadCloser, initial []byte) (*object.Object, io.ReadSeekCloser, error) {
func (t *FSTree) readHeaderAndPayload(f io.ReadCloser, initial []byte, combined bool) (*object.Object, io.ReadSeekCloser, error) {
var err error
if len(initial) < object.MaxHeaderLen {
_ = f.Close()
Expand All @@ -139,13 +140,13 @@ func (t *FSTree) readHeaderAndPayload(f io.ReadCloser, initial []byte) (*object.
}, nil
}

return t.readUntilPayload(f, initial)
return t.readUntilPayload(f, initial, combined)
}

// readUntilPayload reads an object from the file until the payload field is reached
// and returns the object along with a reader for the remaining data.
// This function takes ownership of the io.ReadCloser and will close it if it does not return it.
func (t *FSTree) readUntilPayload(f io.ReadCloser, initial []byte) (*object.Object, io.ReadSeekCloser, error) {
func (t *FSTree) readUntilPayload(f io.ReadCloser, initial []byte, combined bool) (*object.Object, io.ReadSeekCloser, error) {
reader := f

if t.IsCompressed(initial) {
Expand All @@ -170,6 +171,21 @@ func (t *FSTree) readUntilPayload(f io.ReadCloser, initial []byte) (*object.Obje
return nil, nil, fmt.Errorf("extract header and payload: %w", err)
}

if combined {
if full := obj.PayloadSize(); full > uint64(len(payloadPrefix)) {
rem := full - uint64(len(payloadPrefix))
Copy link
Contributor Author

@cthulhu-rider cthulhu-rider Feb 4, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we could also use the length tag from the combined file as the limit, it is read anyway. This would also work when the actual and in-header payload lengths differ. In practice, such objects should not be

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

an additional argument can be #3783 and movement towards FSTree abstraction from the object structure

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I'd prefer length from combined prefix to be used as the final limit. It'd probably also be much easier, you can pass limited reader from upper level function already (where you know the size exactly and give these lower level methods only the data they need).

if rem > math.MaxInt64 {
// we could give this control to the caller, but the case is purely theoretical
return nil, nil, fmt.Errorf("too big payload remainder %d bytes", rem)
}

return obj, &payloadReader{
Reader: io.MultiReader(bytes.NewReader(payloadPrefix), io.LimitReader(reader, int64(rem))),
close: reader.Close,
}, nil
}
}

return obj, &payloadReader{
Reader: io.MultiReader(bytes.NewReader(payloadPrefix), reader),
close: reader.Close,
Expand Down
Loading