Skip to content

Commit a60fc9a

Browse files
craig[bot]andrewbaptistnvanbenschotenmgartner
committed
118329: kvclient: misc range cache cleanups r=arulajmani a=andrewbaptist Numerous small cleanups to the range cache. After these changes, The RangeCache and related classes have a much simpler interface with some nice usability benefits: 1) CacheEntry is no longer public. This is good because it is intended to be immutable and it is now easier to audit its usage. 2) Test only methods are more clearly changed to designate them that way. This will prevent new code from using them incorrectly 3) Unused methods are removed from the interface. 118800: *: use new slice to array conversion syntax r=nvanbenschoten a=nvanbenschoten Closes #110261. This commit adopts the new slice to array conversion syntax introduced in go 1.20. The new syntax is only applicable in one small part of the codebase, so it's a small change. From https://tip.golang.org/doc/go1.20: > Go 1.20 extends this to allow conversions from a slice to an array: given a slice x, [4]byte(x) can now be written instead of *(*[4]byte)(x). Release note: None 118923: sql: reduce allocations in rowContainerIterator r=mgartner a=mgartner In #111318 a shallow copy of rows was added to `rowContainerIterator.Next`. This copy is only necessary for one usage of the iterator within `plpgsqlCursorHelper`. This commit moves the copy to the cursor helper which reduces allocations for all other usages of the iterator. Informs #117546 Release note: None Co-authored-by: Andrew Baptist <[email protected]> Co-authored-by: Nathan VanBenschoten <[email protected]> Co-authored-by: Marcus Gartner <[email protected]>
4 parents 414670b + a94850c + 3116fdf + 6889c36 commit a60fc9a

File tree

15 files changed

+354
-403
lines changed

15 files changed

+354
-403
lines changed

pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -814,14 +814,14 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
814814
n1.QueryRow(t, `SELECT id from system.namespace WHERE name='test'`).Scan(&tableID)
815815
tablePrefix := keys.MustAddr(keys.SystemSQLCodec.TablePrefix(tableID))
816816
n4Cache := tc.Server(3).DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache()
817-
entry := n4Cache.GetCached(ctx, tablePrefix, false /* inverted */)
818-
require.NotNil(t, entry)
819-
require.False(t, entry.Lease().Empty())
820-
require.Equal(t, roachpb.StoreID(1), entry.Lease().Replica.StoreID)
817+
entry, err := n4Cache.TestingGetCached(ctx, tablePrefix, false /* inverted */)
818+
require.NoError(t, err)
819+
require.False(t, entry.Lease.Empty())
820+
require.Equal(t, roachpb.StoreID(1), entry.Lease.Replica.StoreID)
821821
require.Equal(t, []roachpb.ReplicaDescriptor{
822822
{NodeID: 1, StoreID: 1, ReplicaID: 1},
823823
{NodeID: 2, StoreID: 2, ReplicaID: 2},
824-
}, entry.Desc().Replicas().Descriptors())
824+
}, entry.Desc.Replicas().Descriptors())
825825

826826
// Remove the follower and add a new non-voter to n3. n2 will no longer have a
827827
// replica.
@@ -837,19 +837,19 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
837837
rec := <-recCh
838838
require.False(t, kv.OnlyFollowerReads(rec), "query was served through follower reads: %s", rec)
839839
// Check that the cache was properly updated.
840-
entry = n4Cache.GetCached(ctx, tablePrefix, false /* inverted */)
841-
require.NotNil(t, entry)
842-
require.False(t, entry.Lease().Empty())
843-
require.Equal(t, roachpb.StoreID(1), entry.Lease().Replica.StoreID)
840+
entry, err = n4Cache.TestingGetCached(ctx, tablePrefix, false /* inverted */)
841+
require.NoError(t, err)
842+
require.False(t, entry.Lease.Empty())
843+
require.Equal(t, roachpb.StoreID(1), entry.Lease.Replica.StoreID)
844844
require.Equal(t, []roachpb.ReplicaDescriptor{
845845
{NodeID: 1, StoreID: 1, ReplicaID: 1},
846846
{NodeID: 3, StoreID: 3, ReplicaID: 3, Type: roachpb.NON_VOTER},
847-
}, entry.Desc().Replicas().Descriptors())
847+
}, entry.Desc.Replicas().Descriptors())
848848

849849
// Make a note of the follower reads metric on n3. We'll check that it was
850850
// incremented.
851851
var followerReadsCountBefore int64
852-
err := tc.Servers[2].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error {
852+
err = tc.Servers[2].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error {
853853
followerReadsCountBefore = s.Metrics().FollowerReadsCount.Count()
854854
return nil
855855
})
@@ -880,14 +880,14 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
880880
n3 := sqlutils.MakeSQLRunner(tc.Conns[2])
881881
n3.Exec(t, "SELECT * from test WHERE k=1")
882882
n3Cache := tc.Server(2).DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache()
883-
entry = n3Cache.GetCached(ctx, tablePrefix, false /* inverted */)
884-
require.NotNil(t, entry)
885-
require.False(t, entry.Lease().Empty())
886-
require.Equal(t, roachpb.StoreID(1), entry.Lease().Replica.StoreID)
883+
entry, err = n3Cache.TestingGetCached(ctx, tablePrefix, false /* inverted */)
884+
require.NoError(t, err)
885+
require.False(t, entry.Lease.Empty())
886+
require.Equal(t, roachpb.StoreID(1), entry.Lease.Replica.StoreID)
887887
require.Equal(t, []roachpb.ReplicaDescriptor{
888888
{NodeID: 1, StoreID: 1, ReplicaID: 1},
889889
{NodeID: 3, StoreID: 3, ReplicaID: 3, Type: roachpb.NON_VOTER},
890-
}, entry.Desc().Replicas().Descriptors())
890+
}, entry.Desc.Replicas().Descriptors())
891891

892892
// Enable DistSQL so that we have a distributed plan with a single flow on
893893
// n3 (local plans ignore the misplanned ranges).
@@ -1129,15 +1129,15 @@ func TestSecondaryTenantFollowerReadsRouting(t *testing.T) {
11291129
tenantSQL.Exec(t, `SELECT * FROM t.test WHERE k = 1`)
11301130
tablePrefix := keys.MustAddr(codec.TenantPrefix())
11311131
cache := tenants[gatewayNode].DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache()
1132-
entry := cache.GetCached(ctx, tablePrefix, false /* inverted */)
1133-
require.NotNil(t, entry)
1134-
require.False(t, entry.Lease().Empty())
1135-
require.Equal(t, roachpb.StoreID(1), entry.Lease().Replica.StoreID)
1132+
entry, err := cache.TestingGetCached(ctx, tablePrefix, false /* inverted */)
1133+
require.NoError(t, err)
1134+
require.False(t, entry.Lease.Empty())
1135+
require.Equal(t, roachpb.StoreID(1), entry.Lease.Replica.StoreID)
11361136
require.Equal(t, []roachpb.ReplicaDescriptor{
11371137
{NodeID: 1, StoreID: 1, ReplicaID: 1},
11381138
{NodeID: 2, StoreID: 2, ReplicaID: 2},
11391139
{NodeID: 3, StoreID: 3, ReplicaID: 3},
1140-
}, entry.Desc().Replicas().Descriptors())
1140+
}, entry.Desc.Replicas().Descriptors())
11411141

11421142
tenantSQL.Exec(t, historicalQuery)
11431143
rec := <-recCh

pkg/ccl/multiregionccl/datadriven_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -319,11 +319,11 @@ SET CLUSTER SETTING kv.allocator.min_lease_transfer_interval = '5m'
319319
}
320320
cache := ds.tc.Server(idx).DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache()
321321
tablePrefix := keys.MustAddr(keys.SystemSQLCodec.TablePrefix(tableID))
322-
entry := cache.GetCached(ctx, tablePrefix, false /* inverted */)
323-
if entry == nil {
324-
return errors.Newf("no entry found for %s in cache", tbName).Error()
322+
entry, err := cache.TestingGetCached(ctx, tablePrefix, false /* inverted */)
323+
if err != nil {
324+
return err.Error()
325325
}
326-
return entry.ClosedTimestampPolicy().String()
326+
return entry.ClosedTimestampPolicy.String()
327327

328328
case "wait-for-zone-config-changes":
329329
lookupKey, err := getRangeKeyForInput(t, d, ds.tc)

pkg/ccl/multiregionccl/roundtrips_test.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -123,15 +123,15 @@ func TestEnsureLocalReadsOnGlobalTables(t *testing.T) {
123123

124124
// Check that the cache was indeed populated.
125125
cache := tc.Server(i).DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache()
126-
entry := cache.GetCached(context.Background(), tablePrefix, false /* inverted */)
127-
require.NotNil(t, entry.Lease().Empty())
128-
require.NotNil(t, entry)
126+
entry, err := cache.TestingGetCached(context.Background(), tablePrefix, false /* inverted */)
127+
require.NoError(t, err)
128+
require.NotNil(t, entry.Lease.Empty())
129129

130-
if expected, got := roachpb.LEAD_FOR_GLOBAL_READS, entry.ClosedTimestampPolicy(); got != expected {
130+
if expected, got := roachpb.LEAD_FOR_GLOBAL_READS, entry.ClosedTimestampPolicy; got != expected {
131131
return errors.Newf("expected closedts policy %s, got %s", expected, got)
132132
}
133133

134-
isLeaseHolder = entry.Lease().Replica.NodeID == tc.Server(i).NodeID()
134+
isLeaseHolder = entry.Lease.Replica.NodeID == tc.Server(i).NodeID()
135135
return nil
136136
})
137137

pkg/kv/bulk/sst_batcher.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -449,7 +449,7 @@ func (b *SSTBatcher) flushIfNeeded(ctx context.Context, nextKey roachpb.Key) err
449449
if r, err := b.rc.Lookup(ctx, k); err != nil {
450450
log.Warningf(ctx, "failed to lookup range cache entry for key %v: %v", k, err)
451451
} else {
452-
k := r.Desc().EndKey.AsRawKey()
452+
k := r.Desc.EndKey.AsRawKey()
453453
b.flushKey = k
454454
log.VEventf(ctx, 3, "%s building sstable that will flush before %v", b.name, k)
455455
}

pkg/kv/bulk/sst_batcher_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,7 @@ func runTestImport(t *testing.T, batchSizeValue int64) {
294294
for _, k := range []int{0, split1} {
295295
ent, err := ds.RangeDescriptorCache().Lookup(ctx, keys.MustAddr(key(k)))
296296
require.NoError(t, err)
297-
mockCache.Insert(ctx, roachpb.RangeInfo{Desc: *ent.Desc()})
297+
mockCache.Insert(ctx, ent)
298298
}
299299

300300
t.Logf("splitting at %s", key(split2))

pkg/kv/kvclient/kvcoord/dist_sender_test.go

Lines changed: 45 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -850,16 +850,14 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) {
850850
t.Fatalf("unexpected error: %v", pErr)
851851
}
852852
require.Equal(t, 2, attempts)
853-
rng := ds.rangeCache.GetCached(ctx, testUserRangeDescriptor.StartKey, false /* inverted */)
854-
require.NotNil(t, rng)
853+
rng, err := ds.rangeCache.TestingGetCached(ctx, testUserRangeDescriptor.StartKey, false /* inverted */)
854+
require.NoError(t, err)
855855

856856
if tc.expLeaseholder != nil {
857-
lh := rng.Leaseholder()
858-
require.NotNil(t, lh)
859-
require.Equal(t, tc.expLeaseholder, lh)
857+
lh := rng.Lease.Replica
858+
require.Equal(t, *tc.expLeaseholder, lh)
860859
if tc.expLease {
861-
l := rng.Lease()
862-
require.NotNil(t, l)
860+
l := rng.Lease
863861
require.Equal(t, *tc.expLeaseholder, l.Replica)
864862
// The transport retry will use the replica descriptor from the
865863
// initial range descriptor, not the one returned in the NLHE, i.e.
@@ -868,7 +866,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) {
868866
expRetryReplica.Type = 0
869867
require.Equal(t, expRetryReplica, retryReplica)
870868
} else {
871-
require.Nil(t, rng.Lease())
869+
require.True(t, rng.Lease.Empty())
872870
}
873871
}
874872
})
@@ -1529,10 +1527,11 @@ func TestDistSenderLeaseholderDown(t *testing.T) {
15291527
t.Errorf("contacted n1: %t, contacted n2: %t", contacted1, contacted2)
15301528
}
15311529

1532-
rng := ds.rangeCache.GetCached(ctx, testUserRangeDescriptor.StartKey, false /* inverted */)
1533-
require.Equal(t, desc, *rng.Desc())
1534-
require.Equal(t, roachpb.StoreID(2), rng.Lease().Replica.StoreID)
1535-
require.Equal(t, roachpb.LAG_BY_CLUSTER_SETTING, rng.ClosedTimestampPolicy())
1530+
rng, err := ds.rangeCache.TestingGetCached(ctx, testUserRangeDescriptor.StartKey, false /* inverted */)
1531+
require.NoError(t, err)
1532+
require.Equal(t, desc, rng.Desc)
1533+
require.Equal(t, roachpb.StoreID(2), rng.Lease.Replica.StoreID)
1534+
require.Equal(t, roachpb.LAG_BY_CLUSTER_SETTING, rng.ClosedTimestampPolicy)
15361535
}
15371536

15381537
// TestRetryOnDescriptorLookupError verifies that the DistSender retries a descriptor
@@ -1793,11 +1792,11 @@ func TestEvictCacheOnError(t *testing.T) {
17931792
if _, pErr := kv.SendWrapped(ctx, ds, put); pErr != nil && !testutils.IsPError(pErr, errString) && !testutils.IsError(pErr.GoError(), ctx.Err().Error()) {
17941793
t.Errorf("put encountered unexpected error: %s", pErr)
17951794
}
1796-
rng := ds.rangeCache.GetCached(ctx, testUserRangeDescriptor.StartKey, false /* inverted */)
1795+
rng, err := ds.rangeCache.TestingGetCached(ctx, testUserRangeDescriptor.StartKey, false /* inverted */)
17971796
if tc.shouldClearReplica {
1798-
require.Nil(t, rng)
1797+
require.Error(t, err)
17991798
} else if tc.shouldClearLeaseHolder {
1800-
require.True(t, rng.Lease().Empty())
1799+
require.True(t, rng.Lease.Empty())
18011800
}
18021801
})
18031802
}
@@ -2301,16 +2300,8 @@ func TestDistSenderDescriptorUpdatesOnSuccessfulRPCs(t *testing.T) {
23012300
// Check that the cache has the updated descriptor returned by the RPC.
23022301
for _, ri := range tc {
23032302
rk := ri.Desc.StartKey
2304-
entry := ds.rangeCache.GetCached(ctx, rk, false /* inverted */)
2305-
require.NotNil(t, entry)
2306-
require.Equal(t, &ri.Desc, entry.Desc())
2307-
if ri.Lease.Empty() {
2308-
require.Nil(t, entry.Leaseholder())
2309-
require.Nil(t, entry.Lease())
2310-
} else {
2311-
require.Equal(t, &ri.Lease, entry.Lease())
2312-
}
2313-
require.Equal(t, ri.ClosedTimestampPolicy, entry.ClosedTimestampPolicy())
2303+
entry, _ := ds.rangeCache.TestingGetCached(ctx, rk, false /* inverted */)
2304+
require.Equal(t, ri, entry)
23142305
}
23152306
})
23162307
}
@@ -2374,9 +2365,10 @@ func TestSendRPCRangeNotFoundError(t *testing.T) {
23742365
if len(seen) == 1 {
23752366
// Pretend that this replica is the leaseholder in the cache to verify
23762367
// that the response evicts it.
2377-
rng := ds.rangeCache.GetCached(ctx, descriptor.StartKey, false /* inverse */)
2368+
rng, err := ds.rangeCache.TestingGetCached(ctx, descriptor.StartKey, false /* inverse */)
2369+
require.NoError(t, err)
23782370
ds.rangeCache.Insert(ctx, roachpb.RangeInfo{
2379-
Desc: *rng.Desc(),
2371+
Desc: rng.Desc,
23802372
Lease: roachpb.Lease{Replica: ba.Replica},
23812373
})
23822374
}
@@ -2409,9 +2401,9 @@ func TestSendRPCRangeNotFoundError(t *testing.T) {
24092401
t.Fatal(err)
24102402
}
24112403

2412-
rng := ds.rangeCache.GetCached(ctx, descriptor.StartKey, false /* inverted */)
2413-
require.NotNil(t, rng)
2414-
require.Equal(t, leaseholderStoreID, rng.Lease().Replica.StoreID)
2404+
rng, e := ds.rangeCache.TestingGetCached(ctx, descriptor.StartKey, false /* inverted */)
2405+
require.NoError(t, e)
2406+
require.Equal(t, leaseholderStoreID, rng.Lease.Replica.StoreID)
24152407
}
24162408

24172409
func TestMultiRangeGapReverse(t *testing.T) {
@@ -4084,10 +4076,10 @@ func TestCanSendToFollower(t *testing.T) {
40844076
// we've had where we were always updating the leaseholder on successful
40854077
// RPCs because we erroneously assumed that a success must come from the
40864078
// leaseholder.
4087-
rng := ds.rangeCache.GetCached(ctx, testUserRangeDescriptor.StartKey, false /* inverted */)
4088-
require.NotNil(t, rng)
4089-
require.NotNil(t, rng.Lease())
4090-
require.Equal(t, roachpb.StoreID(2), rng.Lease().Replica.StoreID)
4079+
rng, err := ds.rangeCache.TestingGetCached(ctx, testUserRangeDescriptor.StartKey, false /* inverted */)
4080+
require.NoError(t, err)
4081+
require.NotNil(t, rng.Lease)
4082+
require.Equal(t, roachpb.StoreID(2), rng.Lease.Replica.StoreID)
40914083
})
40924084
}
40934085
}
@@ -4250,10 +4242,11 @@ func TestEvictMetaRange(t *testing.T) {
42504242
}
42514243

42524244
// Verify that there is one meta2 cached range.
4253-
cachedRange := ds.rangeCache.GetCached(ctx, keys.RangeMetaKey(roachpb.RKey("a")), false)
4254-
if !cachedRange.Desc().StartKey.Equal(keys.Meta2Prefix) || !cachedRange.Desc().EndKey.Equal(testMetaEndKey) {
4245+
cachedRange, err := ds.rangeCache.TestingGetCached(ctx, keys.RangeMetaKey(roachpb.RKey("a")), false)
4246+
require.NoError(t, err)
4247+
if !cachedRange.Desc.StartKey.Equal(keys.Meta2Prefix) || !cachedRange.Desc.EndKey.Equal(testMetaEndKey) {
42554248
t.Fatalf("expected cached meta2 range to be [%s, %s), actual [%s, %s)",
4256-
keys.Meta2Prefix, testMetaEndKey, cachedRange.Desc().StartKey, cachedRange.Desc().EndKey)
4249+
keys.Meta2Prefix, testMetaEndKey, cachedRange.Desc.StartKey, cachedRange.Desc.EndKey)
42574250
}
42584251

42594252
// Simulate a split on the meta2 range and mark it as stale.
@@ -4265,15 +4258,17 @@ func TestEvictMetaRange(t *testing.T) {
42654258
}
42664259

42674260
// Verify that there are two meta2 cached ranges.
4268-
cachedRange = ds.rangeCache.GetCached(ctx, keys.RangeMetaKey(roachpb.RKey("a")), false)
4269-
if !cachedRange.Desc().StartKey.Equal(keys.Meta2Prefix) || !cachedRange.Desc().EndKey.Equal(splitKey) {
4261+
cachedRange, err = ds.rangeCache.TestingGetCached(ctx, keys.RangeMetaKey(roachpb.RKey("a")), false)
4262+
require.NoError(t, err)
4263+
if !cachedRange.Desc.StartKey.Equal(keys.Meta2Prefix) || !cachedRange.Desc.EndKey.Equal(splitKey) {
42704264
t.Fatalf("expected cached meta2 range to be [%s, %s), actual [%s, %s)",
4271-
keys.Meta2Prefix, splitKey, cachedRange.Desc().StartKey, cachedRange.Desc().EndKey)
4265+
keys.Meta2Prefix, splitKey, cachedRange.Desc.StartKey, cachedRange.Desc.EndKey)
42724266
}
4273-
cachedRange = ds.rangeCache.GetCached(ctx, keys.RangeMetaKey(roachpb.RKey("b")), false)
4274-
if !cachedRange.Desc().StartKey.Equal(splitKey) || !cachedRange.Desc().EndKey.Equal(testMetaEndKey) {
4267+
cachedRange, err = ds.rangeCache.TestingGetCached(ctx, keys.RangeMetaKey(roachpb.RKey("b")), false)
4268+
require.NoError(t, err)
4269+
if !cachedRange.Desc.StartKey.Equal(splitKey) || !cachedRange.Desc.EndKey.Equal(testMetaEndKey) {
42754270
t.Fatalf("expected cached meta2 range to be [%s, %s), actual [%s, %s)",
4276-
splitKey, testMetaEndKey, cachedRange.Desc().StartKey, cachedRange.Desc().EndKey)
4271+
splitKey, testMetaEndKey, cachedRange.Desc.StartKey, cachedRange.Desc.EndKey)
42774272
}
42784273
})
42794274
}
@@ -5291,9 +5286,8 @@ func TestSendToReplicasSkipsStaleReplicas(t *testing.T) {
52915286
},
52925287
},
52935288
})
5294-
ent, err := rc.Lookup(ctx, roachpb.RKeyMin)
5289+
tok, err := rc.LookupWithEvictionToken(ctx, roachpb.RKeyMin, rangecache.EvictionToken{}, false)
52955290
require.NoError(t, err)
5296-
tok := rc.MakeEvictionToken(&ent)
52975291

52985292
numCalled := 0
52995293
transportFn := func(_ context.Context, ba *kvpb.BatchRequest) (*kvpb.BatchResponse, error) {
@@ -5349,16 +5343,16 @@ func TestSendToReplicasSkipsStaleReplicas(t *testing.T) {
53495343
_, err = ds.sendToReplicas(ctx, ba, tok, false /* withCommit */)
53505344
require.IsType(t, &sendError{}, err)
53515345
require.Regexp(t, "NotLeaseHolderError", err)
5352-
cached := rc.GetCached(ctx, tc.initialDesc.StartKey, false /* inverted */)
5353-
require.NotNil(t, cached)
5354-
require.Equal(t, tc.updatedDesc, *cached.Desc())
5346+
cached, err := rc.TestingGetCached(ctx, tc.initialDesc.StartKey, false /* inverted */)
5347+
require.NoError(t, err)
5348+
require.Equal(t, tc.updatedDesc, cached.Desc)
53555349
require.Equal(t, tc.expReplicasTried, numCalled)
53565350
if tc.expLeaseholder == 0 {
53575351
// Check that the leaseholder is cleared out.
5358-
require.Nil(t, cached.Leaseholder())
5352+
require.True(t, cached.Lease.Empty())
53595353
} else {
5360-
require.NotNil(t, cached.Leaseholder())
5361-
require.Equal(t, tc.expLeaseholder, cached.Leaseholder().ReplicaID)
5354+
require.NotNil(t, cached.Lease)
5355+
require.Equal(t, tc.expLeaseholder, cached.Lease.Replica.ReplicaID)
53625356
}
53635357
})
53645358
})

0 commit comments

Comments
 (0)