Skip to content

Commit 4be2e61

Browse files
committed
merge 7.0 -> 7.1
2 parents eb6ebd9 + 224158c commit 4be2e61

File tree

10 files changed

+1261
-281
lines changed

10 files changed

+1261
-281
lines changed
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
!include suite/ndb/my.cnf
2+
3+
[cluster_config.1]
4+
DataMemory=300M
5+
IndexMemory=700M
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
set max_heap_table_size = 286720000;
2+
create table t1 (a int key) engine=memory;
3+
load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 columns terminated by ' ' (a, @col2);
4+
insert into t1 select a + 10000 from t1;;
5+
insert into t1 select a + 10000 * 2 from t1;;
6+
insert into t1 select a + 10000 * 2 * 2 from t1;;
7+
insert into t1 select a + 10000 * 2 * 2 * 2 from t1;;
8+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 from t1;;
9+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 * 2 from t1;;
10+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 * 2 * 2 from t1;;
11+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 * 2 * 2 * 2 from t1;;
12+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 from t1;;
13+
select count(*) from t1;
14+
count(*)
15+
5120000
16+
alter table t1 engine=ndbcluster comment='NDB_TABLE=NOLOGGING' partition by key() partitions 1;
17+
alter table t1 engine=memory;
18+
select count(*) from t1;
19+
count(*)
20+
5120000
21+
drop table t1;
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
-- source include/have_ndb.inc
2+
3+
# Test is using error insert, check that binaries support it
4+
-- source suite/ndb/t/have_ndb_error_insert.inc
5+
6+
# Use small LoadFactors to force sparse hash table
7+
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all error 3003" >> $NDB_TOOLS_OUTPUT
8+
9+
set max_heap_table_size = 286720000;
10+
create table t1 (a int key) engine=memory;
11+
load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 columns terminated by ' ' (a, @col2);
12+
let $i = 9;
13+
let $b = 10000;
14+
while ($i)
15+
{
16+
--eval insert into t1 select a + $b from t1;
17+
let $b = $b * 2;
18+
dec $i;
19+
}
20+
select count(*) from t1;
21+
alter table t1 engine=ndbcluster comment='NDB_TABLE=NOLOGGING' partition by key() partitions 1;
22+
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all report memory" >> $NDB_TOOLS_OUTPUT
23+
alter table t1 engine=memory;
24+
select count(*) from t1;
25+
drop table t1;

storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp

Lines changed: 64 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <pc.hpp>
2727
#include <DynArr256.hpp>
2828
#include <SimulatedBlock.hpp>
29+
#include <LHLevel.hpp>
2930

3031
#ifdef DBACC_C
3132
// Debug Macros
@@ -199,7 +200,7 @@ class ElementHeader {
199200
*
200201
* l = Locked -- If true contains operation else scan bits + hash value
201202
* s = Scan bits
202-
* h = Hash value
203+
* h = Reduced hash value. The lower bits used for address is shifted away
203204
* o = Operation ptr I
204205
*
205206
* 1111111111222222222233
@@ -208,17 +209,16 @@ class ElementHeader {
208209
* ooooooooooooooooooooooooooooooo
209210
*/
210211
public:
211-
STATIC_CONST( HASH_VALUE_PART_MASK = 0xFFFF );
212-
213212
static bool getLocked(Uint32 data);
214213
static bool getUnlocked(Uint32 data);
215214
static Uint32 getScanBits(Uint32 data);
216-
static Uint32 getHashValuePart(Uint32 data);
217215
static Uint32 getOpPtrI(Uint32 data);
216+
static LHBits16 getReducedHashValue(Uint32 data);
218217

219218
static Uint32 setLocked(Uint32 opPtrI);
220-
static Uint32 setUnlocked(Uint32 hashValuePart, Uint32 scanBits);
219+
static Uint32 setUnlocked(Uint32 scanBits, LHBits16 const& reducedHashValue);
221220
static Uint32 setScanBit(Uint32 header, Uint32 scanBit);
221+
static Uint32 setReducedHashValue(Uint32 header, LHBits16 const& reducedHashValue);
222222
static Uint32 clearScanBit(Uint32 header, Uint32 scanBit);
223223
};
224224

@@ -241,11 +241,11 @@ ElementHeader::getScanBits(Uint32 data){
241241
return (data >> 1) & ((1 << MAX_PARALLEL_SCANS_PER_FRAG) - 1);
242242
}
243243

244-
inline
245-
Uint32
246-
ElementHeader::getHashValuePart(Uint32 data){
244+
inline
245+
LHBits16
246+
ElementHeader::getReducedHashValue(Uint32 data){
247247
assert(getUnlocked(data));
248-
return data >> 16;
248+
return LHBits16::unpack(data >> 16);
249249
}
250250

251251
inline
@@ -258,12 +258,15 @@ ElementHeader::getOpPtrI(Uint32 data){
258258
inline
259259
Uint32
260260
ElementHeader::setLocked(Uint32 opPtrI){
261+
assert(opPtrI < 0x8000000);
261262
return (opPtrI << 1) + 0;
262263
}
263264
inline
264265
Uint32
265-
ElementHeader::setUnlocked(Uint32 hashValue, Uint32 scanBits){
266-
return (hashValue << 16) + (scanBits << 1) + 1;
266+
ElementHeader::setUnlocked(Uint32 scanBits, LHBits16 const& reducedHashValue)
267+
{
268+
assert(scanBits < (1 << MAX_PARALLEL_SCANS_PER_FRAG));
269+
return (Uint32(reducedHashValue.pack()) << 16) | (scanBits << 1) | 1;
267270
}
268271

269272
inline
@@ -280,6 +283,13 @@ ElementHeader::clearScanBit(Uint32 header, Uint32 scanBit){
280283
return header & (~(scanBit << 1));
281284
}
282285

286+
inline
287+
Uint32
288+
ElementHeader::setReducedHashValue(Uint32 header, LHBits16 const& reducedHashValue)
289+
{
290+
assert(getUnlocked(header));
291+
return (Uint32(reducedHashValue.pack()) << 16) | (header & 0xffff);
292+
}
283293

284294
class Dbacc: public SimulatedBlock {
285295
friend class DbaccProxy;
@@ -401,14 +411,15 @@ struct Fragmentrec {
401411
// slackCheck When slack goes over this value it is time to expand.
402412
// slackCheck = (maxp + p + 1)*(maxloadfactor - minloadfactor) or
403413
// bucketSize * hysteresis
414+
// Since at most RNIL 8KiB-pages can be used for a fragment, the extreme values
415+
// for slack will be within -2^43 and +2^43 words.
404416
//-----------------------------------------------------------------------------
417+
LHLevelRH level;
405418
Uint32 localkeylen;
406-
Uint32 maxp;
407419
Uint32 maxloadfactor;
408420
Uint32 minloadfactor;
409-
Uint32 p;
410-
Uint32 slack;
411-
Uint32 slackCheck;
421+
Int64 slack;
422+
Int64 slackCheck;
412423

413424
//-----------------------------------------------------------------------------
414425
// nextfreefrag is the next free fragment if linked into a free list
@@ -441,19 +452,17 @@ struct Fragmentrec {
441452
Uint16 keyLength;
442453

443454
//-----------------------------------------------------------------------------
444-
// This flag is used to avoid sending a big number of expand or shrink signals
445-
// when simultaneously committing many inserts or deletes.
455+
// Only allow one expand or shrink signal in queue at the time.
446456
//-----------------------------------------------------------------------------
447-
Uint8 expandFlag;
457+
bool expandOrShrinkQueued;
448458

449459
//-----------------------------------------------------------------------------
450460
// hashcheckbit is the bit to check whether to send element to split bucket or not
451461
// k (== 6) is the number of buckets per page
452-
// lhfragbits is the number of bits used to calculate the fragment id
453462
//-----------------------------------------------------------------------------
454-
Uint8 hashcheckbit;
455-
Uint8 k;
456-
Uint8 lhfragbits;
463+
STATIC_CONST( k = 6 );
464+
STATIC_CONST( MIN_HASH_COMPARE_BITS = 7 );
465+
STATIC_CONST( MAX_HASH_VALUE_BITS = 31 );
457466

458467
//-----------------------------------------------------------------------------
459468
// nodetype can only be STORED in this release. Is currently only set, never read
@@ -469,6 +478,11 @@ struct Fragmentrec {
469478
// flag to mark that execEXPANDCHECK2 has failed due to DirRange full
470479
//-----------------------------------------------------------------------------
471480
Uint8 dirRangeFull;
481+
482+
public:
483+
Uint32 getPageNumber(Uint32 bucket_number) const;
484+
Uint32 getPageIndex(Uint32 bucket_number) const;
485+
bool enough_valid_bits(LHBits16 const& reduced_hash_value) const;
472486
};
473487

474488
typedef Ptr<Fragmentrec> FragmentrecPtr;
@@ -484,8 +498,7 @@ struct Operationrec {
484498
Uint32 elementPointer;
485499
Uint32 fid;
486500
Uint32 fragptr;
487-
Uint32 hashvaluePart;
488-
Uint32 hashValue;
501+
LHBits32 hashValue;
489502
Uint32 nextLockOwnerOp;
490503
Uint32 nextOp;
491504
Uint32 nextParallelQue;
@@ -511,7 +524,8 @@ struct Operationrec {
511524
Uint16 tupkeylen;
512525
Uint32 xfrmtupkeylen;
513526
Uint32 userblockref;
514-
Uint32 scanBits;
527+
Uint16 scanBits;
528+
LHBits16 reducedHashValue;
515529

516530
enum OpBits {
517531
OP_MASK = 0x0000F // 4 bits for operation type
@@ -692,8 +706,8 @@ struct Tabrec {
692706
void releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr);
693707
void releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr);
694708
void initScanFragmentPart(Signal* signal);
695-
Uint32 checkScanExpand(Signal* signal);
696-
Uint32 checkScanShrink(Signal* signal);
709+
Uint32 checkScanExpand(Signal* signal, Uint32 splitBucket);
710+
Uint32 checkScanShrink(Signal* signal, Uint32 sourceBucket, Uint32 destBucket);
697711
void initialiseFragRec(Signal* signal);
698712
void initialiseFsConnectionRec(Signal* signal);
699713
void initialiseFsOpRec(Signal* signal);
@@ -761,6 +775,10 @@ struct Tabrec {
761775
void seizeRightlist(Signal* signal);
762776
Uint32 readTablePk(Uint32 lkey1, Uint32 lkey2, Uint32 eh, OperationrecPtr);
763777
Uint32 getElement(Signal* signal, OperationrecPtr& lockOwner);
778+
LHBits32 getElementHash(OperationrecPtr& oprec);
779+
LHBits32 getElementHash(Uint32 const* element, Int32 forward);
780+
LHBits32 getElementHash(Uint32 const* element, Int32 forward, OperationrecPtr& oprec);
781+
void shrink_adjust_reduced_hash_value(Uint32 bucket_number);
764782
Uint32 getPagePtr(DynArr256::Head&, Uint32);
765783
bool setPagePtr(DynArr256::Head& directory, Uint32 index, Uint32 ptri);
766784
Uint32 unsetPagePtr(DynArr256::Head& directory, Uint32 index);
@@ -837,8 +855,6 @@ struct Tabrec {
837855

838856
void zpagesize_error(const char* where);
839857

840-
void reenable_expand_after_redo_log_exection_complete(Signal*);
841-
842858
// charsets
843859
void xfrmKeyData(Signal* signal);
844860

@@ -1000,7 +1016,6 @@ struct Tabrec {
10001016
Uint32 tgeContainerptr;
10011017
Uint32 tgeElementptr;
10021018
Uint32 tgeForward;
1003-
Uint32 texpReceivedBucket;
10041019
Uint32 texpDirInd;
10051020
Uint32 texpDirRangeIndex;
10061021
Uint32 texpDirPageIndex;
@@ -1034,7 +1049,6 @@ struct Tabrec {
10341049
Uint32 tmp;
10351050
Uint32 tmpP;
10361051
Uint32 tmpP2;
1037-
Uint32 tmp1;
10381052
Uint32 tmp2;
10391053
Uint32 tgflPageindex;
10401054
Uint32 tmpindex;
@@ -1094,4 +1108,23 @@ struct Tabrec {
10941108
Uint32 c_memusage_report_frequency;
10951109
};
10961110

1111+
inline Uint32 Dbacc::Fragmentrec::getPageNumber(Uint32 bucket_number) const
1112+
{
1113+
assert(bucket_number < RNIL);
1114+
return bucket_number >> k;
1115+
}
1116+
1117+
inline Uint32 Dbacc::Fragmentrec::getPageIndex(Uint32 bucket_number) const
1118+
{
1119+
assert(bucket_number < RNIL);
1120+
return bucket_number & ((1 << k) - 1);
1121+
}
1122+
1123+
inline bool Dbacc::Fragmentrec::enough_valid_bits(LHBits16 const& reduced_hash_value) const
1124+
{
1125+
// Forte C 5.0 needs use of intermediate constant
1126+
int const bits = MIN_HASH_COMPARE_BITS;
1127+
return level.getNeededValidBits(bits) <= reduced_hash_value.valid_bits();
1128+
}
1129+
10971130
#endif

0 commit comments

Comments
 (0)