@@ -4575,34 +4575,45 @@ DictionaryTestSuite.test("removeAt") {
4575
4575
}
4576
4576
4577
4577
DictionaryTestSuite . test ( " localHashSeeds " ) {
4578
- // With global hashing, copying elements in hash order between dictionaries
4578
+ // With global hashing, copying elements in hash order between hash tables
4579
4579
// can become quadratic. (See https://bugs.swift.org/browse/SR-3268)
4580
4580
//
4581
4581
// We defeat this by mixing the local storage capacity into the global hash
4582
4582
// seed, thereby breaking the correlation between bucket indices across
4583
- // dictionaries with different sizes.
4583
+ // hash tables with different sizes.
4584
4584
//
4585
- // Verify this works by copying the 1% of elements near the beginning of a
4586
- // large Dictionary into a smaller one. If the elements end up in the same
4587
- // order in the smaller Dictionary, then that indicates we do not use
4588
- // size-dependent seeding.
4585
+ // Verify this works by copying a small sampling of elements near the
4586
+ // beginning of a large Dictionary into a smaller one. If the elements end up
4587
+ // in the same order in the smaller Dictionary, then that indicates we do not
4588
+ // use size-dependent seeding.
4589
+
4589
4590
let count = 100_000
4590
- var large = [ Int: Int] ( minimumCapacity: count)
4591
+ // Set a large table size to reduce frequency/length of collision chains.
4592
+ var large = [ Int: Int] ( minimumCapacity: 4 * count)
4591
4593
for i in 1 ..< count {
4592
4594
large [ i] = 2 * i
4593
4595
}
4594
4596
4595
- // Take the second 1% of elements. The hash table may begin with collided
4596
- // elements wrapped over from the end -- we need to skip over these, as they
4597
- // would be sorted into irregular slots in the smaller table.
4598
- let slice = large. prefix ( 2 * count / 100 ) . dropFirst ( count / 100 )
4597
+ let bunch = count / 100 // 1 percent's worth of elements
4598
+
4599
+ // Copy two bunches of elements into another dictionary that's half the size
4600
+ // of the first. We start after the initial bunch because the hash table may
4601
+ // begin with collided elements wrapped over from the end, and these would be
4602
+ // sorted into irregular slots in the smaller table.
4603
+ let slice = large. prefix ( 3 * bunch) . dropFirst ( bunch)
4599
4604
var small = [ Int: Int] ( minimumCapacity: large. capacity / 2 )
4600
4605
expectLT ( small. capacity, large. capacity)
4601
4606
for (key, value) in slice {
4602
4607
small [ key] = value
4603
4608
}
4609
+
4610
+ // Compare the second halves of the new dictionary and the slice. Ignore the
4611
+ // first halves; the first few elements may not be in the correct order if we
4612
+ // happened to start copying from the middle of a collision chain.
4613
+ let smallKeys = small. dropFirst ( bunch) . map { $0. key }
4614
+ let sliceKeys = slice. dropFirst ( bunch) . map { $0. key }
4604
4615
// If this test fails, there is a problem with local hash seeding.
4605
- expectFalse ( small . map { $0 . key } . elementsEqual ( slice . map { $0 . key } ) )
4616
+ expectFalse ( smallKeys . elementsEqual ( sliceKeys ) )
4606
4617
}
4607
4618
4608
4619
DictionaryTestSuite . setUp {
0 commit comments