407
407
MM_Scavenger::workerSetupForGC (MM_EnvironmentStandard *env)
408
408
{
409
409
/* Clear local stats */
410
- memset (( void *)&( env->_scavengerStats ), 0 , sizeof (MM_ScavengerStats) );
410
+ env->_scavengerStats . clear ( );
411
411
412
412
/* Clear the worker hot field statistics */
413
413
clearHotFieldStats (env);
@@ -469,7 +469,7 @@ MM_Scavenger::reportScavengeStart(MM_EnvironmentStandard *env)
469
469
}
470
470
471
471
void
472
- MM_Scavenger::reportScavengeEnd (MM_EnvironmentStandard *env)
472
+ MM_Scavenger::reportScavengeEnd (MM_EnvironmentStandard *env, bool lastIncrement )
473
473
{
474
474
OMRPORT_ACCESS_FROM_OMRPORT (env->getPortLibrary ());
475
475
@@ -485,7 +485,8 @@ MM_Scavenger::reportScavengeEnd(MM_EnvironmentStandard *env)
485
485
env->getOmrVMThread (),
486
486
omrtime_hires_clock (),
487
487
J9HOOK_MM_PRIVATE_SCAVENGE_END,
488
- env->_cycleState ->_activeSubSpace
488
+ env->_cycleState ->_activeSubSpace ,
489
+ lastIncrement
489
490
);
490
491
}
491
492
@@ -647,7 +648,7 @@ MM_Scavenger::mergeHotFieldStats(MM_EnvironmentStandard *env)
647
648
* Clear any global stats associated to the scavenger.
648
649
*/
649
650
void
650
- MM_Scavenger::clearGCStats (MM_EnvironmentStandard *env)
651
+ MM_Scavenger::clearGCStats (MM_EnvironmentBase *env)
651
652
{
652
653
_extensions->scavengerStats .clear ();
653
654
}
@@ -656,7 +657,7 @@ MM_Scavenger::clearGCStats(MM_EnvironmentStandard *env)
656
657
* Merge the current threads scavenge stats into the global scavenge stats.
657
658
*/
658
659
void
659
- MM_Scavenger::mergeGCStats (MM_EnvironmentStandard *env)
660
+ MM_Scavenger::mergeGCStats (MM_EnvironmentBase *env)
660
661
{
661
662
OMRPORT_ACCESS_FROM_OMRVM (_omrVM);
662
663
@@ -3477,19 +3478,12 @@ MM_Scavenger::masterThreadGarbageCollect(MM_EnvironmentBase *envBase, MM_Allocat
3477
3478
}
3478
3479
3479
3480
reportGCCycleStart (env);
3481
+ masterSetupForGC (env);
3480
3482
}
3481
3483
reportGCStart (env);
3482
3484
reportGCIncrementStart (env);
3483
- #if defined(OMR_GC_CONCURRENT_SCAVENGER)
3484
- if (!isConcurrentInProgress ())
3485
- #endif
3486
- {
3487
- reportScavengeStart (env);
3488
-
3489
- _extensions->scavengerStats ._startTime = omrtime_hires_clock ();
3490
-
3491
- masterSetupForGC (env);
3492
- }
3485
+ reportScavengeStart (env);
3486
+ _extensions->scavengerStats ._startTime = omrtime_hires_clock ();
3493
3487
3494
3488
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
3495
3489
if (_extensions->concurrentScavenger ) {
@@ -3500,18 +3494,18 @@ MM_Scavenger::masterThreadGarbageCollect(MM_EnvironmentBase *envBase, MM_Allocat
3500
3494
scavenge (env);
3501
3495
}
3502
3496
3497
+ _extensions->scavengerStats ._endTime = omrtime_hires_clock ();
3503
3498
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
3504
- if (!isConcurrentInProgress ())
3499
+ if (isConcurrentInProgress ()) {
3500
+ reportScavengeEnd (env, false );
3501
+ } else
3505
3502
#endif
3506
3503
{
3504
+ reportScavengeEnd (env, true );
3505
+
3507
3506
/* defer to collector language interface */
3508
3507
_cli->scavenger_masterThreadGarbageCollect_scavengeComplete (env);
3509
3508
3510
- /* Record the completion time of the scavenge */
3511
- _extensions->scavengerStats ._endTime = omrtime_hires_clock ();
3512
-
3513
- reportScavengeEnd (env);
3514
-
3515
3509
/* Reset the resizable flag of the semi space.
3516
3510
* NOTE: Must be done before we attempt to resize the new space.
3517
3511
*/
@@ -3786,19 +3780,28 @@ MM_Scavenger::getCollectorExpandSize(MM_EnvironmentBase *env)
3786
3780
void
3787
3781
MM_Scavenger::internalPreCollect (MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, MM_AllocateDescription *allocDescription, uint32_t gcCode)
3788
3782
{
3789
- _cycleState = MM_CycleState ();
3790
3783
env->_cycleState = &_cycleState;
3791
- env->_cycleState ->_gcCode = MM_GCCode (gcCode);
3792
- env->_cycleState ->_type = _cycleType;
3793
- env->_cycleState ->_collectionStatistics = &_collectionStatistics;
3794
3784
3795
- /* If we are in an excessiveGC level beyond normal then an aggressive GC is
3796
- * conducted to free up as much space as possible
3785
+ #if defined(OMR_GC_CONCURRENT_SCAVENGER)
3786
+ /* Cycle state is initialized only once at the beginning of a cycle. We do not want, in mid-end cycle phases, to reset some members
3787
+ * that are initialized at the beginning (such as verboseContextID).
3797
3788
*/
3798
- if (!env->_cycleState ->_gcCode .isExplicitGC ()) {
3799
- if (excessive_gc_normal != _extensions->excessiveGCLevel ) {
3800
- /* convert the current mode to excessive GC mode */
3801
- env->_cycleState ->_gcCode = MM_GCCode (J9MMCONSTANT_IMPLICIT_GC_EXCESSIVE);
3789
+ if (!isConcurrentInProgress ())
3790
+ #endif
3791
+ {
3792
+ _cycleState = MM_CycleState ();
3793
+ _cycleState._gcCode = MM_GCCode (gcCode);
3794
+ _cycleState._type = _cycleType;
3795
+ _cycleState._collectionStatistics = &_collectionStatistics;
3796
+
3797
+ /* If we are in an excessiveGC level beyond normal then an aggressive GC is
3798
+ * conducted to free up as much space as possible
3799
+ */
3800
+ if (!_cycleState._gcCode .isExplicitGC ()) {
3801
+ if (excessive_gc_normal != _extensions->excessiveGCLevel ) {
3802
+ /* convert the current mode to excessive GC mode */
3803
+ _cycleState._gcCode = MM_GCCode (J9MMCONSTANT_IMPLICIT_GC_EXCESSIVE);
3804
+ }
3802
3805
}
3803
3806
}
3804
3807
@@ -3814,7 +3817,7 @@ MM_Scavenger::internalPostCollect(MM_EnvironmentBase *env, MM_MemorySubSpace *su
3814
3817
{
3815
3818
calcGCStats ((MM_EnvironmentStandard*)env);
3816
3819
3817
- return ;
3820
+ Assert_MM_true (env-> _cycleState == &_cycleState) ;
3818
3821
}
3819
3822
3820
3823
/* *
@@ -4535,6 +4538,8 @@ MM_Scavenger::scavengeComplete(MM_EnvironmentBase *envBase)
4535
4538
4536
4539
Assert_MM_true (concurrent_state_complete == _concurrentState);
4537
4540
4541
+ clearGCStats (env);
4542
+
4538
4543
GC_OMRVMThreadListIterator threadIterator (_extensions->getOmrVM ());
4539
4544
OMR_VMThread *walkThread = NULL ;
4540
4545
@@ -4597,7 +4602,7 @@ MM_Scavenger::scavengeIncremental(MM_EnvironmentBase *env)
4597
4602
case concurrent_state_scan:
4598
4603
{
4599
4604
/* This is just for corner cases that must be run in STW mode.
4600
- * Default main scan phase is done by scavengeConcurrent . */
4605
+ * Default main scan phase is done within masterThreadConcurrentCollect . */
4601
4606
4602
4607
timeout = scavengeScan (env);
4603
4608
@@ -4639,24 +4644,34 @@ MM_Scavenger::workThreadProcessRoots(MM_EnvironmentStandard *env)
4639
4644
rootScanner.scavengeRememberedSet (env);
4640
4645
4641
4646
rootScanner.scanRoots (env);
4647
+
4648
+ mergeGCStats (env);
4642
4649
}
4643
4650
4644
4651
void
4645
4652
MM_Scavenger::workThreadScan (MM_EnvironmentStandard *env)
4646
4653
{
4654
+ /* Clear thread local stats */
4655
+ env->_scavengerStats .clear ();
4656
+
4647
4657
completeScan (env);
4648
4658
// todo: are these two steps really necessary?
4649
4659
// we probably have to clear all things for master since it'll be doing final release/clear on behalf of mutator threads
4650
4660
// but is it really needed for slaves as well?
4651
4661
addCopyCachesToFreeList (env);
4652
4662
abandonTLHRemainders (env);
4663
+
4664
+ mergeGCStats (env);
4653
4665
}
4654
4666
4655
4667
void
4656
4668
MM_Scavenger::workThreadComplete (MM_EnvironmentStandard *env)
4657
4669
{
4658
4670
Assert_MM_true (_extensions->concurrentScavenger );
4659
4671
4672
+ /* Clear thread local stats */
4673
+ env->_scavengerStats .clear ();
4674
+
4660
4675
MM_ScavengerRootScanner rootScanner (env, this );
4661
4676
4662
4677
/* Complete scan loop regardless if we already aborted. If so, the scan operation will just fix up pointers that still point to forwarded objects.
@@ -4703,46 +4718,62 @@ MM_Scavenger::workThreadComplete(MM_EnvironmentStandard *env)
4703
4718
}
4704
4719
4705
4720
uintptr_t
4706
- MM_Scavenger::scavengeConcurrent (MM_EnvironmentBase *env, UDATA totalBytesToScavenge, volatile bool *forceExit )
4721
+ MM_Scavenger::masterThreadConcurrentCollect (MM_EnvironmentBase *env)
4707
4722
{
4708
4723
Assert_MM_true (concurrent_state_scan == _concurrentState);
4709
4724
4710
- MM_ConcurrentScavengeTask scavengeTask (env, _dispatcher, this , MM_ConcurrentScavengeTask::SCAVENGE_SCAN, totalBytesToScavenge, forceExit, env->_cycleState );
4725
+ clearGCStats (env);
4726
+
4727
+ MM_ConcurrentScavengeTask scavengeTask (env, _dispatcher, this , MM_ConcurrentScavengeTask::SCAVENGE_SCAN, UDATA_MAX, &_forceConcurrentTermination, env->_cycleState );
4711
4728
/* Concurrent background task will run with different (typically lower) number of threads. */
4712
4729
_dispatcher->run (env, &scavengeTask, _extensions->concurrentScavengerBackgroundThreads );
4713
4730
4714
- uintptr_t bytesScanned = scavengeTask.getBytesScanned ();
4715
4731
/* we can't assert the work queue is empty. some mutator threads could have just flushed their copy caches, after the task terminated */
4716
4732
_concurrentState = concurrent_state_complete;
4717
4733
/* make allocate space non-allocatable to trigger the final GC phase */
4718
4734
_activeSubSpace->flip (env, MM_MemorySubSpaceSemiSpace::disable_allocation);
4719
4735
4720
- return bytesScanned;
4736
+ /* return the number of bytes scanned since the caller needs to pass it into postConcurrentUpdateStatsAndReport for stats reporting */
4737
+ return scavengeTask.getBytesScanned ();
4721
4738
}
4722
4739
4723
- uintptr_t
4724
- MM_Scavenger::masterThreadConcurrentCollect (MM_EnvironmentBase *env)
4740
+ void MM_Scavenger::preConcurrentInitializeStatsAndReport (MM_EnvironmentBase *env, MM_ConcurrentPhaseStatsBase *stats)
4725
4741
{
4726
- /* note that we can't check isConcurrentWorkAvailable at this point since another thread could have set _forceConcurrentTermination since the
4727
- * master thread calls this outside of the control monitor
4728
- */
4742
+ OMRPORT_ACCESS_FROM_OMRPORT (env->getPortLibrary ());
4729
4743
Assert_MM_true (NULL == env->_cycleState );
4730
- Assert_MM_true (concurrent_state_scan == _concurrentState);
4731
-
4732
4744
env->_cycleState = &_cycleState;
4733
4745
4734
- /* We pass a pointer to _forceConcurrentTermination so that we can cause the concurrent to terminate early by setting the
4735
- * flag to true if we want to interrupt it so that the master thread returns to the control mutex in order to receive a
4736
- * new GC request.
4737
- */
4738
- uintptr_t bytesConcurrentlyScanned = scavengeConcurrent (env, 100000 , &_forceConcurrentTermination);
4746
+ stats->_cycleID = _cycleState._verboseContextID ;
4739
4747
4740
- env->_cycleState = NULL ;
4748
+ TRIGGER_J9HOOK_MM_PRIVATE_CONCURRENT_PHASE_START (
4749
+ _extensions->privateHookInterface ,
4750
+ env->getOmrVMThread (),
4751
+ omrtime_hires_clock (),
4752
+ J9HOOK_MM_PRIVATE_CONCURRENT_PHASE_START,
4753
+ stats);
4741
4754
4742
- /* return the number of bytes scanned since the caller needs to pass it into postConcurrentUpdateStatsAndReport for stats reporting */
4743
- return bytesConcurrentlyScanned;
4755
+ _extensions->scavengerStats ._startTime = omrtime_hires_clock ();
4744
4756
}
4745
4757
4758
+ void MM_Scavenger::postConcurrentUpdateStatsAndReport (MM_EnvironmentBase *env, MM_ConcurrentPhaseStatsBase *stats, UDATA bytesConcurrentlyScanned)
4759
+ {
4760
+ OMRPORT_ACCESS_FROM_OMRPORT (env->getPortLibrary ());
4761
+
4762
+ stats->_terminationWasRequested = _forceConcurrentTermination;
4763
+
4764
+ _extensions->scavengerStats ._endTime = omrtime_hires_clock ();
4765
+
4766
+ TRIGGER_J9HOOK_MM_PRIVATE_CONCURRENT_PHASE_END (
4767
+ _extensions->privateHookInterface ,
4768
+ env->getOmrVMThread (),
4769
+ omrtime_hires_clock (),
4770
+ J9HOOK_MM_PRIVATE_CONCURRENT_PHASE_END,
4771
+ stats);
4772
+
4773
+ env->_cycleState = NULL ;
4774
+ }
4775
+
4776
+
4746
4777
void
4747
4778
MM_Scavenger::switchConcurrentForThread (MM_EnvironmentBase *env)
4748
4779
{
0 commit comments