Skip to content

Commit d0f47aa

Browse files
byahn0996facebook-github-bot
authored andcommitted
FixedSizeIndex: Handling non-existing key case
Summary: With FixedSizeIndex, there's more possibilities for false positive For the non-existing item, it may try best effort to find it, and may return false positive PackedItemRecord instance. But when we are sure that it doesn't really exist, it's better to return "it's not there". To simply indicate it doesn't exist, I changed recordRef() to recordPtr() in ExclusiveLockedBucket and SharedLockedBucket. Those members were already pointer type within the structures, so there's no increased vulnerability by using pointer type directly. Reviewed By: rlyerly Differential Revision: D77402423 fbshipit-source-id: ed65de8f94f724cbc86835320c0aac6b6de47d26
1 parent cb492a4 commit d0f47aa

File tree

2 files changed

+77
-54
lines changed

2 files changed

+77
-54
lines changed

cachelib/navy/block_cache/FixedSizeIndex.cpp

Lines changed: 29 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -53,12 +53,12 @@ Index::LookupResult FixedSizeIndex::lookup(uint64_t key) {
5353
// also not sure if bumping up hit count with couldExist() makes sense.
5454
ExclusiveLockedBucket elb{key, *this, false};
5555

56-
if (elb.recordRef().isValid()) {
56+
if (elb.isValidRecord()) {
5757
return LookupResult(true,
58-
ItemRecord(elb.recordRef().address,
59-
elb.recordRef().getSizeHint(),
58+
ItemRecord(elb.recordPtr()->address,
59+
elb.recordPtr()->getSizeHint(),
6060
0, /* totalHits */
61-
elb.recordRef().bumpCurHits()));
61+
elb.recordPtr()->bumpCurHits()));
6262
}
6363

6464
return {};
@@ -67,12 +67,12 @@ Index::LookupResult FixedSizeIndex::lookup(uint64_t key) {
6767
Index::LookupResult FixedSizeIndex::peek(uint64_t key) const {
6868
SharedLockedBucket slb{key, *this};
6969

70-
if (slb.recordRef().isValid()) {
70+
if (slb.isValidRecord()) {
7171
return LookupResult(true,
72-
ItemRecord(slb.recordRef().address,
73-
slb.recordRef().getSizeHint(),
72+
ItemRecord(slb.recordPtr()->address,
73+
slb.recordPtr()->getSizeHint(),
7474
0, /* totalHits */
75-
slb.recordRef().info.curHits));
75+
slb.recordPtr()->info.curHits));
7676
}
7777

7878
return {};
@@ -84,18 +84,19 @@ Index::LookupResult FixedSizeIndex::insert(uint64_t key,
8484
LookupResult lr;
8585
ExclusiveLockedBucket elb{key, *this, true};
8686

87-
if (elb.recordRef().isValid()) {
87+
XDCHECK(elb.bucketExist());
88+
if (elb.isValidRecord()) {
8889
lr = LookupResult(true,
89-
ItemRecord(elb.recordRef().address,
90-
elb.recordRef().getSizeHint(),
90+
ItemRecord(elb.recordPtr()->address,
91+
elb.recordPtr()->getSizeHint(),
9192
0, /* totalHits */
92-
elb.recordRef().info.curHits));
93+
elb.recordPtr()->info.curHits));
9394
} else {
9495
++elb.validBucketCntRef();
9596
}
9697
// TODO: need to combine this two ops into one to make sure updateDistInfo()
9798
// part is not missed
98-
elb.recordRef() = PackedItemRecord{address, sizeHint, /* currentHits */ 0};
99+
*elb.recordPtr() = PackedItemRecord{address, sizeHint, /* currentHits */ 0};
99100
elb.updateDistInfo(key, *this);
100101

101102
return lr;
@@ -106,9 +107,9 @@ bool FixedSizeIndex::replaceIfMatch(uint64_t key,
106107
uint32_t oldAddress) {
107108
ExclusiveLockedBucket elb{key, *this, false};
108109

109-
if (elb.recordRef().address == oldAddress) {
110-
elb.recordRef().address = newAddress;
111-
elb.recordRef().info.curHits = 0;
110+
if (elb.isValidRecord() && elb.recordPtr()->address == oldAddress) {
111+
elb.recordPtr()->address = newAddress;
112+
elb.recordPtr()->info.curHits = 0;
112113
return true;
113114
}
114115
return false;
@@ -117,27 +118,29 @@ bool FixedSizeIndex::replaceIfMatch(uint64_t key,
117118
Index::LookupResult FixedSizeIndex::remove(uint64_t key) {
118119
ExclusiveLockedBucket elb{key, *this, false};
119120

120-
if (elb.recordRef().isValid()) {
121-
LookupResult lr{true, ItemRecord(elb.recordRef().address,
122-
elb.recordRef().getSizeHint(),
121+
if (elb.isValidRecord()) {
122+
LookupResult lr{true, ItemRecord(elb.recordPtr()->address,
123+
elb.recordPtr()->getSizeHint(),
123124
0, /* totalHits */
124-
elb.recordRef().info.curHits)};
125+
elb.recordPtr()->info.curHits)};
125126

126127
XDCHECK(elb.validBucketCntRef() > 0);
127128
--elb.validBucketCntRef();
128-
elb.recordRef() = PackedItemRecord{};
129+
*elb.recordPtr() = PackedItemRecord{};
129130
return lr;
130131
}
131132

132-
elb.recordRef() = PackedItemRecord{};
133+
if (elb.bucketExist()) {
134+
*elb.recordPtr() = PackedItemRecord{};
135+
}
133136
return {};
134137
}
135138

136139
bool FixedSizeIndex::removeIfMatch(uint64_t key, uint32_t address) {
137140
ExclusiveLockedBucket elb{key, *this, false};
138141

139-
if (elb.recordRef().address == address) {
140-
elb.recordRef() = PackedItemRecord{};
142+
if (elb.isValidRecord() && elb.recordPtr()->address == address) {
143+
*elb.recordPtr() = PackedItemRecord{};
141144

142145
XDCHECK(elb.validBucketCntRef() > 0);
143146
--elb.validBucketCntRef();
@@ -264,8 +267,8 @@ void FixedSizeIndex::setHitsTestOnly(uint64_t key,
264267
uint8_t totalHits) {
265268
ExclusiveLockedBucket elb{key, *this, false};
266269

267-
if (elb.recordRef().isValid()) {
268-
elb.recordRef().info.curHits =
270+
if (elb.isValidRecord()) {
271+
elb.recordPtr()->info.curHits =
269272
PackedItemRecord::truncateCurHits(currentHits);
270273
XLOGF(INFO,
271274
"setHitsTestOnly() for {}. totalHits {} was discarded in "

cachelib/navy/block_cache/FixedSizeIndex.h

Lines changed: 48 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -288,13 +288,18 @@ class FixedSizeIndex : public Index {
288288
void initialize();
289289

290290
// Random prime numbers for the distance for next bucket slot to use.
291-
static constexpr uint8_t kNextBucketOffset[4] = {0, 23, 61, 97};
291+
static constexpr std::array<uint8_t, 4> kNextBucketOffset{0, 23, 61, 97};
292+
// This offset will be used to indicate there's no valid bucket slot matching
293+
// the given key
294+
static constexpr uint8_t kInvalidBucketSlotOffset = 0xff;
295+
// This bucket id will be used to indicate there's no valid bucket for the key
296+
static constexpr uint64_t kInvalidBucketId = 0xffffffffffffffff;
292297

293298
uint8_t decideBucketOffset(uint64_t bid, uint64_t key) const {
294299
auto mid = mutexId(bid);
295300

296301
// Check if there's already one matching
297-
for (auto i = 0; i < 4; i++) {
302+
for (size_t i = 0; i < kNextBucketOffset.size(); i++) {
298303
auto curBid = calcBucketId(bid, i);
299304
// Make sure we don't go across the mutex boundary
300305
XDCHECK(mutexId(curBid) == mid) << bid << " " << i << " " << curBid;
@@ -307,7 +312,7 @@ class FixedSizeIndex : public Index {
307312
}
308313

309314
// No match. Find the empty one
310-
for (auto i = 0; i < 4; i++) {
315+
for (size_t i = 0; i < kNextBucketOffset.size(); i++) {
311316
auto curBid = calcBucketId(bid, i);
312317
// Make sure we don't go across the mutex boundary
313318
XDCHECK(mutexId(curBid) == mid) << bid << " " << i << " " << curBid;
@@ -324,7 +329,7 @@ class FixedSizeIndex : public Index {
324329
uint8_t checkBucketOffset(uint64_t bid, uint64_t key) const {
325330
auto mid = mutexId(bid);
326331

327-
for (auto i = 1; i < 4; i++) {
332+
for (size_t i = 0; i < kNextBucketOffset.size(); i++) {
328333
auto curBid = calcBucketId(bid, i);
329334
// Make sure we don't go across the mutex boundary
330335
XDCHECK(mutexId(curBid) == mid) << bid << " " << i << " " << curBid;
@@ -335,7 +340,20 @@ class FixedSizeIndex : public Index {
335340
return i;
336341
}
337342
}
338-
return 0;
343+
return kInvalidBucketSlotOffset;
344+
}
345+
346+
// This helper will get the proper bucket id and record entry
347+
// Return value : The pair of <Bucket id, pointer to the record>
348+
std::pair<uint64_t, PackedItemRecord*> getBucket(uint64_t orgBid,
349+
uint8_t offset) const {
350+
if (offset != kInvalidBucketSlotOffset) {
351+
auto bid = calcBucketId(orgBid, offset);
352+
return std::make_pair(bid, &ht_[bid]);
353+
} else {
354+
// There's no bucket for the given key
355+
return std::make_pair(kInvalidBucketId, nullptr);
356+
}
339357
}
340358

341359
// Updates hits information of a key.
@@ -366,6 +384,7 @@ class FixedSizeIndex : public Index {
366384
// We don't want to go across the mutex boundary, so if it goes beyond that,
367385
// it will wrap around and go back to the beginning of current mutex
368386
// boundary
387+
XDCHECK(offset < kNextBucketOffset.size()) << offset;
369388
return (bid / numBucketsPerMutex_) * numBucketsPerMutex_ +
370389
((bid + kNextBucketOffset[offset]) % numBucketsPerMutex_);
371390
}
@@ -388,8 +407,8 @@ class FixedSizeIndex : public Index {
388407

389408
// A helper class for exclusive locked access to a bucket.
390409
// It will lock the proper mutex with the given key when it's created.
391-
// recordRef() and validBucketCntRef() will return the record and
392-
// valid bucket count with exclusively locked bucket reference. Locked
410+
// recordPtr() and validBucketCntRef() will return the record and
411+
// valid bucket count with exclusively locked bucket. Locked
393412
// mutex will be released when it's destroyed.
394413
class ExclusiveLockedBucket {
395414
public:
@@ -399,59 +418,60 @@ class FixedSizeIndex : public Index {
399418
: bid_(index.bucketId(key)),
400419
mid_{index.mutexId(bid_)},
401420
lg_{index.mutex_[mid_]},
402-
record_{&index.ht_[bid_]},
403421
validBuckets_{index.validBucketsPerMutex_[mid_]} {
404422
auto offset = alloc ? index.decideBucketOffset(bid_, key)
405423
: index.checkBucketOffset(bid_, key);
406-
if (offset != 0) {
407-
bid_ = index.calcBucketId(bid_, offset);
408-
record_ = &index.ht_[bid_];
409-
bucketOffset_ = offset;
410-
}
424+
std::tie(bid_, record_) = index.getBucket(bid_, offset);
425+
bucketOffset_ = offset;
411426
}
412427

413-
PackedItemRecord& recordRef() { return *record_; }
428+
PackedItemRecord* recordPtr() { return record_; }
414429
size_t& validBucketCntRef() { return validBuckets_; }
415430
void updateDistInfo(uint64_t key, FixedSizeIndex& index) {
416-
index.bucketDistInfo_.updateBucketFillInfo(
417-
bid_, bucketOffset_, index.partialKeyBits(key));
431+
if (bucketOffset_ != kInvalidBucketSlotOffset) {
432+
index.bucketDistInfo_.updateBucketFillInfo(
433+
bid_, bucketOffset_, index.partialKeyBits(key));
434+
}
435+
}
436+
bool isValidRecord() const {
437+
return (record_ != nullptr && record_->isValid());
418438
}
439+
bool bucketExist() const { return record_ != nullptr; }
419440

420441
private:
421442
uint64_t bid_;
422443
uint64_t mid_;
423444
std::lock_guard<SharedMutex> lg_;
424-
PackedItemRecord* record_;
425445
size_t& validBuckets_;
426-
uint8_t bucketOffset_{0};
446+
PackedItemRecord* record_{};
447+
uint8_t bucketOffset_{kInvalidBucketSlotOffset};
427448
};
428449

429450
// A helper class for shared locked access to a bucket.
430451
// It will lock the proper mutex with the given key when it's created.
431-
// recordRef() will return the record with shared locked bucket reference.
452+
// recordPtr() will return the record with shared locked bucket.
432453
// Locked mutex will be released when it's destroyed.
433454
class SharedLockedBucket {
434455
public:
435456
explicit SharedLockedBucket(uint64_t key, const FixedSizeIndex& index)
436457
: bid_(index.bucketId(key)),
437458
mid_{index.mutexId(bid_)},
438-
lg_{index.mutex_[mid_]},
439-
record_{&index.ht_[bid_]} {
459+
lg_{index.mutex_[mid_]} {
440460
// check next bucket if it should be used
441-
auto offset = (index.checkBucketOffset(bid_, key));
442-
if (offset != 0) {
443-
bid_ = index.calcBucketId(bid_, offset);
444-
record_ = &index.ht_[bid_];
445-
}
461+
std::tie(bid_, record_) =
462+
index.getBucket(bid_, index.checkBucketOffset(bid_, key));
446463
}
447464

448-
const PackedItemRecord& recordRef() const { return *record_; }
465+
const PackedItemRecord* recordPtr() const { return record_; }
466+
bool isValidRecord() const {
467+
return (record_ != nullptr && record_->isValid());
468+
}
449469

450470
private:
451471
uint64_t bid_;
452472
uint64_t mid_;
453473
std::shared_lock<SharedMutex> lg_;
454-
const PackedItemRecord* record_;
474+
const PackedItemRecord* record_{};
455475
};
456476

457477
// For unit tests private member access

0 commit comments

Comments
 (0)