Skip to content

Commit 3deb8fc

Browse files
committed
merge 7.1 -> 7.2
2 parents 3bb3020 + 4be2e61 commit 3deb8fc

File tree

10 files changed

+1258
-283
lines changed

10 files changed

+1258
-283
lines changed
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
!include suite/ndb/my.cnf
2+
3+
[cluster_config.1]
4+
DataMemory=300M
5+
IndexMemory=700M
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
set max_heap_table_size = 286720000;
2+
create table t1 (a int key) engine=memory;
3+
load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 columns terminated by ' ' (a, @col2);
4+
insert into t1 select a + 10000 from t1;;
5+
insert into t1 select a + 10000 * 2 from t1;;
6+
insert into t1 select a + 10000 * 2 * 2 from t1;;
7+
insert into t1 select a + 10000 * 2 * 2 * 2 from t1;;
8+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 from t1;;
9+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 * 2 from t1;;
10+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 * 2 * 2 from t1;;
11+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 * 2 * 2 * 2 from t1;;
12+
insert into t1 select a + 10000 * 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 from t1;;
13+
select count(*) from t1;
14+
count(*)
15+
5120000
16+
alter table t1 engine=ndbcluster comment='NDB_TABLE=NOLOGGING' partition by key() partitions 1;
17+
alter table t1 engine=memory;
18+
select count(*) from t1;
19+
count(*)
20+
5120000
21+
drop table t1;
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
-- source include/have_ndb.inc
2+
3+
# Test is using error insert, check that binaries support it
4+
-- source suite/ndb/t/have_ndb_error_insert.inc
5+
6+
# Use small LoadFactors to force sparse hash table
7+
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all error 3003" >> $NDB_TOOLS_OUTPUT
8+
9+
set max_heap_table_size = 286720000;
10+
create table t1 (a int key) engine=memory;
11+
load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 columns terminated by ' ' (a, @col2);
12+
let $i = 9;
13+
let $b = 10000;
14+
while ($i)
15+
{
16+
--eval insert into t1 select a + $b from t1;
17+
let $b = $b * 2;
18+
dec $i;
19+
}
20+
select count(*) from t1;
21+
alter table t1 engine=ndbcluster comment='NDB_TABLE=NOLOGGING' partition by key() partitions 1;
22+
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all report memory" >> $NDB_TOOLS_OUTPUT
23+
alter table t1 engine=memory;
24+
select count(*) from t1;
25+
drop table t1;

storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp

Lines changed: 64 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include <pc.hpp>
2828
#include <DynArr256.hpp>
2929
#include <SimulatedBlock.hpp>
30+
#include <LHLevel.hpp>
3031

3132
#ifdef DBACC_C
3233
// Debug Macros
@@ -200,7 +201,7 @@ class ElementHeader {
200201
*
201202
* l = Locked -- If true contains operation else scan bits + hash value
202203
* s = Scan bits
203-
* h = Hash value
204+
* h = Reduced hash value. The lower bits used for address is shifted away
204205
* o = Operation ptr I
205206
*
206207
* 1111111111222222222233
@@ -209,17 +210,16 @@ class ElementHeader {
209210
* ooooooooooooooooooooooooooooooo
210211
*/
211212
public:
212-
STATIC_CONST( HASH_VALUE_PART_MASK = 0xFFFF );
213-
214213
static bool getLocked(Uint32 data);
215214
static bool getUnlocked(Uint32 data);
216215
static Uint32 getScanBits(Uint32 data);
217-
static Uint32 getHashValuePart(Uint32 data);
218216
static Uint32 getOpPtrI(Uint32 data);
217+
static LHBits16 getReducedHashValue(Uint32 data);
219218

220219
static Uint32 setLocked(Uint32 opPtrI);
221-
static Uint32 setUnlocked(Uint32 hashValuePart, Uint32 scanBits);
220+
static Uint32 setUnlocked(Uint32 scanBits, LHBits16 const& reducedHashValue);
222221
static Uint32 setScanBit(Uint32 header, Uint32 scanBit);
222+
static Uint32 setReducedHashValue(Uint32 header, LHBits16 const& reducedHashValue);
223223
static Uint32 clearScanBit(Uint32 header, Uint32 scanBit);
224224
};
225225

@@ -242,11 +242,11 @@ ElementHeader::getScanBits(Uint32 data){
242242
return (data >> 1) & ((1 << MAX_PARALLEL_SCANS_PER_FRAG) - 1);
243243
}
244244

245-
inline
246-
Uint32
247-
ElementHeader::getHashValuePart(Uint32 data){
245+
inline
246+
LHBits16
247+
ElementHeader::getReducedHashValue(Uint32 data){
248248
assert(getUnlocked(data));
249-
return data >> 16;
249+
return LHBits16::unpack(data >> 16);
250250
}
251251

252252
inline
@@ -259,12 +259,15 @@ ElementHeader::getOpPtrI(Uint32 data){
259259
inline
260260
Uint32
261261
ElementHeader::setLocked(Uint32 opPtrI){
262+
assert(opPtrI < 0x8000000);
262263
return (opPtrI << 1) + 0;
263264
}
264265
inline
265266
Uint32
266-
ElementHeader::setUnlocked(Uint32 hashValue, Uint32 scanBits){
267-
return (hashValue << 16) + (scanBits << 1) + 1;
267+
ElementHeader::setUnlocked(Uint32 scanBits, LHBits16 const& reducedHashValue)
268+
{
269+
assert(scanBits < (1 << MAX_PARALLEL_SCANS_PER_FRAG));
270+
return (Uint32(reducedHashValue.pack()) << 16) | (scanBits << 1) | 1;
268271
}
269272

270273
inline
@@ -281,6 +284,13 @@ ElementHeader::clearScanBit(Uint32 header, Uint32 scanBit){
281284
return header & (~(scanBit << 1));
282285
}
283286

287+
inline
288+
Uint32
289+
ElementHeader::setReducedHashValue(Uint32 header, LHBits16 const& reducedHashValue)
290+
{
291+
assert(getUnlocked(header));
292+
return (Uint32(reducedHashValue.pack()) << 16) | (header & 0xffff);
293+
}
284294

285295
class Dbacc: public SimulatedBlock {
286296
friend class DbaccProxy;
@@ -402,14 +412,15 @@ struct Fragmentrec {
402412
// slackCheck When slack goes over this value it is time to expand.
403413
// slackCheck = (maxp + p + 1)*(maxloadfactor - minloadfactor) or
404414
// bucketSize * hysteresis
415+
// Since at most RNIL 8KiB-pages can be used for a fragment, the extreme values
416+
// for slack will be within -2^43 and +2^43 words.
405417
//-----------------------------------------------------------------------------
418+
LHLevelRH level;
406419
Uint32 localkeylen;
407-
Uint32 maxp;
408420
Uint32 maxloadfactor;
409421
Uint32 minloadfactor;
410-
Uint32 p;
411-
Uint32 slack;
412-
Uint32 slackCheck;
422+
Int64 slack;
423+
Int64 slackCheck;
413424

414425
//-----------------------------------------------------------------------------
415426
// nextfreefrag is the next free fragment if linked into a free list
@@ -442,19 +453,17 @@ struct Fragmentrec {
442453
Uint16 keyLength;
443454

444455
//-----------------------------------------------------------------------------
445-
// This flag is used to avoid sending a big number of expand or shrink signals
446-
// when simultaneously committing many inserts or deletes.
456+
// Only allow one expand or shrink signal in queue at the time.
447457
//-----------------------------------------------------------------------------
448-
Uint8 expandFlag;
458+
bool expandOrShrinkQueued;
449459

450460
//-----------------------------------------------------------------------------
451461
// hashcheckbit is the bit to check whether to send element to split bucket or not
452462
// k (== 6) is the number of buckets per page
453-
// lhfragbits is the number of bits used to calculate the fragment id
454463
//-----------------------------------------------------------------------------
455-
Uint8 hashcheckbit;
456-
Uint8 k;
457-
Uint8 lhfragbits;
464+
STATIC_CONST( k = 6 );
465+
STATIC_CONST( MIN_HASH_COMPARE_BITS = 7 );
466+
STATIC_CONST( MAX_HASH_VALUE_BITS = 31 );
458467

459468
//-----------------------------------------------------------------------------
460469
// nodetype can only be STORED in this release. Is currently only set, never read
@@ -470,6 +479,11 @@ struct Fragmentrec {
470479
// flag to mark that execEXPANDCHECK2 has failed due to DirRange full
471480
//-----------------------------------------------------------------------------
472481
Uint8 dirRangeFull;
482+
483+
public:
484+
Uint32 getPageNumber(Uint32 bucket_number) const;
485+
Uint32 getPageIndex(Uint32 bucket_number) const;
486+
bool enough_valid_bits(LHBits16 const& reduced_hash_value) const;
473487
};
474488

475489
typedef Ptr<Fragmentrec> FragmentrecPtr;
@@ -485,8 +499,7 @@ struct Operationrec {
485499
Uint32 elementPointer;
486500
Uint32 fid;
487501
Uint32 fragptr;
488-
Uint32 hashvaluePart;
489-
Uint32 hashValue;
502+
LHBits32 hashValue;
490503
Uint32 nextLockOwnerOp;
491504
Uint32 nextOp;
492505
Uint32 nextParallelQue;
@@ -512,7 +525,8 @@ struct Operationrec {
512525
Uint16 tupkeylen;
513526
Uint32 xfrmtupkeylen;
514527
Uint32 userblockref;
515-
Uint32 scanBits;
528+
Uint16 scanBits;
529+
LHBits16 reducedHashValue;
516530

517531
enum OpBits {
518532
OP_MASK = 0x0000F // 4 bits for operation type
@@ -693,8 +707,8 @@ struct Tabrec {
693707
void releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr);
694708
void releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr);
695709
void initScanFragmentPart(Signal* signal);
696-
Uint32 checkScanExpand(Signal* signal);
697-
Uint32 checkScanShrink(Signal* signal);
710+
Uint32 checkScanExpand(Signal* signal, Uint32 splitBucket);
711+
Uint32 checkScanShrink(Signal* signal, Uint32 sourceBucket, Uint32 destBucket);
698712
void initialiseFragRec(Signal* signal);
699713
void initialiseFsConnectionRec(Signal* signal);
700714
void initialiseFsOpRec(Signal* signal);
@@ -762,6 +776,10 @@ struct Tabrec {
762776
void seizeRightlist(Signal* signal);
763777
Uint32 readTablePk(Uint32 lkey1, Uint32 lkey2, Uint32 eh, OperationrecPtr);
764778
Uint32 getElement(Signal* signal, OperationrecPtr& lockOwner);
779+
LHBits32 getElementHash(OperationrecPtr& oprec);
780+
LHBits32 getElementHash(Uint32 const* element, Int32 forward);
781+
LHBits32 getElementHash(Uint32 const* element, Int32 forward, OperationrecPtr& oprec);
782+
void shrink_adjust_reduced_hash_value(Uint32 bucket_number);
765783
Uint32 getPagePtr(DynArr256::Head&, Uint32);
766784
bool setPagePtr(DynArr256::Head& directory, Uint32 index, Uint32 ptri);
767785
Uint32 unsetPagePtr(DynArr256::Head& directory, Uint32 index);
@@ -838,8 +856,6 @@ struct Tabrec {
838856

839857
void zpagesize_error(const char* where);
840858

841-
void reenable_expand_after_redo_log_exection_complete(Signal*);
842-
843859
// charsets
844860
void xfrmKeyData(Signal* signal);
845861

@@ -1001,7 +1017,6 @@ struct Tabrec {
10011017
Uint32 tgeContainerptr;
10021018
Uint32 tgeElementptr;
10031019
Uint32 tgeForward;
1004-
Uint32 texpReceivedBucket;
10051020
Uint32 texpDirInd;
10061021
Uint32 texpDirRangeIndex;
10071022
Uint32 texpDirPageIndex;
@@ -1035,7 +1050,6 @@ struct Tabrec {
10351050
Uint32 tmp;
10361051
Uint32 tmpP;
10371052
Uint32 tmpP2;
1038-
Uint32 tmp1;
10391053
Uint32 tmp2;
10401054
Uint32 tgflPageindex;
10411055
Uint32 tmpindex;
@@ -1095,4 +1109,23 @@ struct Tabrec {
10951109
Uint32 c_memusage_report_frequency;
10961110
};
10971111

1112+
inline Uint32 Dbacc::Fragmentrec::getPageNumber(Uint32 bucket_number) const
1113+
{
1114+
assert(bucket_number < RNIL);
1115+
return bucket_number >> k;
1116+
}
1117+
1118+
inline Uint32 Dbacc::Fragmentrec::getPageIndex(Uint32 bucket_number) const
1119+
{
1120+
assert(bucket_number < RNIL);
1121+
return bucket_number & ((1 << k) - 1);
1122+
}
1123+
1124+
inline bool Dbacc::Fragmentrec::enough_valid_bits(LHBits16 const& reduced_hash_value) const
1125+
{
1126+
// Forte C 5.0 needs use of intermediate constant
1127+
int const bits = MIN_HASH_COMPARE_BITS;
1128+
return level.getNeededValidBits(bits) <= reduced_hash_value.valid_bits();
1129+
}
1130+
10981131
#endif

0 commit comments

Comments
 (0)