Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 0 additions & 13 deletions bolt_windows.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,19 +68,6 @@ func mmap(db *DB, sz int) error {
var sizelo, sizehi uint32

if !db.readOnly {
if db.MaxSize > 0 && sz > db.MaxSize {
// The max size only limits future writes; however, we don’t block opening
// and mapping the database if it already exceeds the limit.
fileSize, err := db.fileSize()
if err != nil {
return fmt.Errorf("could not check existing db file size: %s", err)
}

if sz > fileSize {
return errors.ErrMaxSizeReached
}
}

// Truncate the database to the size of the mmap.
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("truncate: %s", err)
Expand Down
26 changes: 17 additions & 9 deletions db.go
Original file line number Diff line number Diff line change
Expand Up @@ -1166,13 +1166,26 @@ func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) {
p.SetId(db.rwtx.meta.Pgid())
var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize
if minsz >= db.datasz {
if err := db.mmap(minsz); err != nil {
if err == berrors.ErrMaxSizeReached {
return nil, err
if db.MaxSize > 0 {
// this calculation matches the calculation in grow
// however, I don't quite understand it. Why is the allocation increment added to the size required,
// rather than the size required rounded up to the next multiple of the allocation increment?
Comment on lines +1171 to +1172
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's over allocation, to amortize the cost of truncate and fsync
refer to a122e1c

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My issue was mostly that what I think happens here is that if you ask for 1 byte, but the alloc size is 10 bytes, it actually allocates 11 bytes. Shouldn't it be 10?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, as mentioned above it's a bit over allocation. By default it's 16MB at most. It shouldn't be a big problem. The logic has been there for almost a decade, let's keep it as it's unless you have a very strong justification.

nextAllocSize := minsz
if nextAllocSize < db.AllocSize {
nextAllocSize = db.AllocSize
} else {
return nil, fmt.Errorf("mmap allocate error: %s", err)
nextAllocSize += db.AllocSize
}

if nextAllocSize > db.MaxSize {
db.Logger().Errorf("[GOOS: %s, GOARCH: %s] maximum db size reached, size: %d, db.MaxSize: %d", runtime.GOOS, runtime.GOARCH, minsz, db.MaxSize)
return nil, berrors.ErrMaxSizeReached
}
}

if err := db.mmap(minsz); err != nil {
return nil, fmt.Errorf("mmap allocate error: %s", err)
}
}

// Move the page id high water mark.
Expand Down Expand Up @@ -1203,11 +1216,6 @@ func (db *DB) grow(sz int) error {
sz += db.AllocSize
}

if !db.readOnly && db.MaxSize > 0 && sz > db.MaxSize {
lg.Errorf("[GOOS: %s, GOARCH: %s] maximum db size reached, size: %d, db.MaxSize: %d", runtime.GOOS, runtime.GOARCH, sz, db.MaxSize)
return berrors.ErrMaxSizeReached
}

// Truncate and fsync to ensure file size metadata is flushed.
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
Expand Down
36 changes: 23 additions & 13 deletions db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1483,11 +1483,11 @@ func TestDBUnmap(t *testing.T) {
db.DB = nil
}

// Convenience function for inserting a bunch of keys with 1000 byte values
func fillDBWithKeys(db *btesting.DB, numKeys int) error {
// Convenience function for inserting a bunch of keys with values of a specific size (in bytes)
func fillDBWithKeys(db *btesting.DB, numKeys, valueSize int) error {
return db.Fill([]byte("data"), 1, numKeys,
func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
func(tx int, k int) []byte { return make([]byte, 1000) },
func(tx int, k int) []byte { return make([]byte, valueSize) },
)
}

Expand Down Expand Up @@ -1543,15 +1543,19 @@ func TestDB_MaxSizeNotExceeded(t *testing.T) {
path := db.Path()

// The data file should be 4 MiB now (expanded once from zero).
// It should have space for roughly 16 more entries before trying to grow
// Keep inserting until grow is required
err := fillDBWithKeys(db, 100)
// It should have space for roughly one more entry with value size 100kB before trying to grow
// This next insert should be too big
err := fillDBWithKeys(db, 2, 100000)
assert.ErrorIs(t, err, berrors.ErrMaxSizeReached)

newSz := fileSize(path)
require.Greater(t, newSz, int64(0), "unexpected new file size: %d", newSz)
assert.LessOrEqual(t, newSz, int64(db.MaxSize), "The size of the data file should not exceed db.MaxSize")

// Now try another write that shouldn't increase the max size
err = fillDBWithKeys(db, 1, 1)
assert.NoError(t, err, "Adding an entry after a failed, oversized write should not error")

err = db.Close()
require.NoError(t, err, "Closing the re-opened database should succeed")
})
Expand All @@ -1567,7 +1571,7 @@ func TestDB_MaxSizeExceededCanOpen(t *testing.T) {
path := db.Path()

// Insert a reasonable amount of data below the max size.
err := fillDBWithKeys(db, 2000)
err := fillDBWithKeys(db, 2000, 1000)
require.NoError(t, err, "fillDbWithKeys should succeed")

err = db.Close()
Expand Down Expand Up @@ -1625,7 +1629,7 @@ func TestDB_MaxSizeExceededCanOpenWithHighMmap(t *testing.T) {
}

// Ensure that when InitialMmapSize is above the limit, opening a database
// that is beyond the maximum size fails in Windows.
// that is beyond the maximum size does not grow the db on Windows.
// In Windows, the file must be expanded to the mmap initial size.
// https://github.com/etcd-io/bbolt/issues/928
func TestDB_MaxSizeExceededDoesNotGrow(t *testing.T) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@ahrtr I've updated this test to demonstrate the issue on Windows I was talking about. When you set InitialMmapSize to be greater than the actual size of the file, the file must be expanded to map it that way.

The assertion on line 1666 fails because newSize is exactly double expandedSize (or, exactly equal to InitialMmapSize)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see.

I think we need to disable InitialMmapSize for Windows; in other words, we don't use it for Windows.

Expand All @@ -1643,17 +1647,23 @@ func TestDB_MaxSizeExceededDoesNotGrow(t *testing.T) {

// The data file should be 4 MiB now (expanded once from zero).
minimumSizeForTest := int64(1024 * 1024)
newSz := fileSize(path)
assert.GreaterOrEqual(t, newSz, minimumSizeForTest, "unexpected new file size: %d. Expected at least %d", newSz, minimumSizeForTest)
expandedSize := fileSize(path)
assert.GreaterOrEqual(t, expandedSize, minimumSizeForTest, "unexpected new file size: %d. Expected at least %d", expandedSize, minimumSizeForTest)

// Now try to re-open the database with an extremely small max size and
// an initial mmap size to be greater than the actual file size, forcing an illegal grow on open
t.Logf("Reopening bbolt DB at: %s", path)
_, err = btesting.OpenDBWithOption(t, path, &bolt.Options{
db, err = btesting.OpenDBWithOption(t, path, &bolt.Options{
MaxSize: 1,
InitialMmapSize: int(newSz) * 2,
InitialMmapSize: int(expandedSize) * 2,
})
assert.Error(t, err, "Opening the DB with InitialMmapSize > MaxSize should cause an error on Windows")
require.NoError(t, err, "Opening the DB with InitialMmapSize > MaxSize should not cause an error, because it is not a write operation")

db.MustClose()

newSize := fileSize(path)

assert.Equal(t, expandedSize, newSize, "Expected the file size to stay the same when MaxSize set impossibly low")
}

func TestDB_HugeValue(t *testing.T) {
Expand Down
Loading