110
111 char* heap_address;
112 ReservedSpace heap_rs;
113
114 size_t heap_alignment = collector_policy()->heap_alignment();
115
116 heap_address = allocate(heap_alignment, &heap_rs);
117
118 if (!heap_rs.is_reserved()) {
119 vm_shutdown_during_initialization(
120 "Could not reserve enough space for object heap");
121 return JNI_ENOMEM;
122 }
123
124 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
125
126 _rem_set = collector_policy()->create_rem_set(reserved_region());
127 set_barrier_set(rem_set()->bs());
128
129 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
130 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set());
131 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
132
133 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
134 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set());
135 clear_incremental_collection_failed();
136
137 #if INCLUDE_ALL_GCS
138 // If we are running CMS, create the collector responsible
139 // for collecting the CMS generations.
140 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
141 bool success = create_cms_collector();
142 if (!success) return JNI_ENOMEM;
143 }
144 #endif // INCLUDE_ALL_GCS
145
146 return JNI_OK;
147 }
148
149 char* GenCollectedHeap::allocate(size_t alignment,
150 ReservedSpace* heap_rs){
151 // Now figure out the total size.
152 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
153 assert(alignment % pageSize == 0, "Must be");
154
185
186 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
187 _old_gen->capacity(),
188 def_new_gen->from()->capacity());
189 policy->initialize_gc_policy_counters();
190 }
191
192 void GenCollectedHeap::ref_processing_init() {
193 _young_gen->ref_processor_init();
194 _old_gen->ref_processor_init();
195 }
196
197 size_t GenCollectedHeap::capacity() const {
198 return _young_gen->capacity() + _old_gen->capacity();
199 }
200
201 size_t GenCollectedHeap::used() const {
202 return _young_gen->used() + _old_gen->used();
203 }
204
205 // Save the "used_region" for generations level and lower.
206 void GenCollectedHeap::save_used_regions(int level) {
207 assert(level == 0 || level == 1, "Illegal level parameter");
208 if (level == 1) {
209 _old_gen->save_used_region();
210 }
211 _young_gen->save_used_region();
212 }
213
214 size_t GenCollectedHeap::max_capacity() const {
215 return _young_gen->max_capacity() + _old_gen->max_capacity();
216 }
217
218 // Update the _full_collections_completed counter
219 // at the end of a stop-world full GC.
220 unsigned int GenCollectedHeap::update_full_collections_completed() {
221 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
222 assert(_full_collections_completed <= _total_full_collections,
223 "Can't complete more collections than were started");
224 _full_collections_completed = _total_full_collections;
225 ml.notify_all();
226 return _full_collections_completed;
227 }
228
229 // Update the _full_collections_completed counter, as appropriate,
230 // at the end of a concurrent GC cycle. Note the conditional update
313 bool is_tlab, bool run_verification, bool clear_soft_refs,
314 bool restore_marks_for_biased_locking) {
315 // Timer for individual generations. Last argument is false: no CR
316 // FIXME: We should try to start the timing earlier to cover more of the GC pause
317 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
318 // so we can assume here that the next GC id is what we want.
319 GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
320 TraceCollectorStats tcs(gen->counters());
321 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
322
323 size_t prev_used = gen->used();
324 gen->stat_record()->invocations++;
325 gen->stat_record()->accumulated_time.start();
326
327 // Must be done anew before each collection because
328 // a previous collection will do mangling and will
329 // change top of some spaces.
330 record_gen_tops_before_GC();
331
332 if (PrintGC && Verbose) {
333 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
334 gen->level(),
335 gen->stat_record()->invocations,
336 size * HeapWordSize);
337 }
338
339 if (run_verification && VerifyBeforeGC) {
340 HandleMark hm; // Discard invalid handles created during verification
341 Universe::verify(" VerifyBeforeGC:");
342 }
343 COMPILER2_PRESENT(DerivedPointerTable::clear());
344
345 if (restore_marks_for_biased_locking) {
346 // We perform this mark word preservation work lazily
347 // because it's only at this point that we know whether we
348 // absolutely have to do it; we want to avoid doing it for
349 // scavenge-only collections where it's unnecessary
350 BiasedLocking::preserve_marks();
351 }
352
353 // Do collection work
354 {
375 // are guaranteed to have empty discovered ref lists.
376 if (rp->discovery_is_atomic()) {
377 rp->enable_discovery();
378 rp->setup_policy(clear_soft_refs);
379 } else {
380 // collect() below will enable discovery as appropriate
381 }
382 gen->collect(full, clear_soft_refs, size, is_tlab);
383 if (!rp->enqueuing_is_done()) {
384 rp->enqueue_discovered_references();
385 } else {
386 rp->set_enqueuing_is_done(false);
387 }
388 rp->verify_no_references_recorded();
389 }
390
391 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
392
393 gen->stat_record()->accumulated_time.stop();
394
395 update_gc_stats(gen->level(), full);
396
397 if (run_verification && VerifyAfterGC) {
398 HandleMark hm; // Discard invalid handles created during verification
399 Universe::verify(" VerifyAfterGC:");
400 }
401
402 if (PrintGCDetails) {
403 gclog_or_tty->print(":");
404 gen->print_heap_change(prev_used);
405 }
406 }
407
408 void GenCollectedHeap::do_collection(bool full,
409 bool clear_all_soft_refs,
410 size_t size,
411 bool is_tlab,
412 int max_level) {
413 ResourceMark rm;
414 DEBUG_ONLY(Thread* my_thread = Thread::current();)
415
416 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
417 assert(my_thread->is_VM_thread() ||
418 my_thread->is_ConcurrentGC_thread(),
419 "incorrect thread type capability");
420 assert(Heap_lock->is_locked(),
421 "the requesting thread should have the Heap_lock");
422 guarantee(!is_gc_active(), "collection is not reentrant");
423
424 if (GC_locker::check_active_before_gc()) {
425 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
426 }
427
428 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
429 collector_policy()->should_clear_all_soft_refs();
430
431 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
432
433 const size_t metadata_prev_used = MetaspaceAux::used_bytes();
434
435 print_heap_before_gc();
436
437 {
438 FlagSetting fl(_is_gc_active, true);
439
440 bool complete = full && (max_level == 1 /* old */);
441 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
442 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
443 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
444 // so we can assume here that the next GC id is what we want.
445 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
446
447 gc_prologue(complete);
448 increment_total_collections(complete);
449
450 size_t gch_prev_used = used();
451 bool run_verification = total_collections() >= VerifyGCStartAt;
452
453 bool prepared_for_verification = false;
454 int max_level_collected = 0;
455 bool old_collects_young = (max_level == 1) &&
456 full &&
457 _old_gen->full_collects_younger_generations();
458 if (!old_collects_young &&
459 _young_gen->should_collect(full, size, is_tlab)) {
460 if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
461 prepare_for_verify();
462 prepared_for_verification = true;
463 }
464
465 assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
466 collect_generation(_young_gen,
467 full,
468 size,
469 is_tlab,
470 run_verification && VerifyGCLevel <= 0,
471 do_clear_all_soft_refs,
472 false);
473
474 if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
475 size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
476 // Allocation request was met by young GC.
477 size = 0;
478 }
479 }
480
481 bool must_restore_marks_for_biased_locking = false;
482
483 if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) {
484 if (!complete) {
485 // The full_collections increment was missed above.
486 increment_total_full_collections();
487 }
488
489 pre_full_gc_dump(NULL); // do any pre full gc dumps
490
491 if (!prepared_for_verification && run_verification &&
492 VerifyGCLevel <= 1 && VerifyBeforeGC) {
493 prepare_for_verify();
494 }
495
496 assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
497 collect_generation(_old_gen,
498 full,
499 size,
500 is_tlab,
501 run_verification && VerifyGCLevel <= 1,
502 do_clear_all_soft_refs,
503 true);
504
505 must_restore_marks_for_biased_locking = true;
506 max_level_collected = 1;
507 }
508
509 // Update "complete" boolean wrt what actually transpired --
510 // for instance, a promotion failure could have led to
511 // a whole heap collection.
512 complete = complete || (max_level_collected == 1 /* old */);
513
514 if (complete) { // We did a "major" collection
515 // FIXME: See comment at pre_full_gc_dump call
516 post_full_gc_dump(NULL); // do any post full gc dumps
517 }
518
519 if (PrintGCDetails) {
520 print_heap_change(gch_prev_used);
521
522 // Print metaspace info for full GC with PrintGCDetails flag.
523 if (complete) {
524 MetaspaceAux::print_metaspace_change(metadata_prev_used);
525 }
526 }
527
528 // Adjust generation sizes.
529 if (max_level_collected == 1 /* old */) {
530 _old_gen->compute_new_size();
531 }
532 _young_gen->compute_new_size();
533
534 if (complete) {
535 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
536 ClassLoaderDataGraph::purge();
537 MetaspaceAux::verify_metrics();
538 // Resize the metaspace capacity after full collections
539 MetaspaceGC::compute_new_size();
540 update_full_collections_completed();
541 }
542
543 // Track memory usage and detect low memory after GC finishes
544 MemoryService::track_memory_usage();
545
546 gc_epilogue(complete);
547
548 if (must_restore_marks_for_biased_locking) {
549 BiasedLocking::restore_marks();
652 assert(code_roots != NULL, "must supply closure for code cache");
653
654 // We only visit parts of the CodeCache when scavenging.
655 CodeCache::scavenge_root_nmethods_do(code_roots);
656 }
657 if (so & SO_AllCodeCache) {
658 assert(code_roots != NULL, "must supply closure for code cache");
659
660 // CMSCollector uses this to do intermediate-strength collections.
661 // We scan the entire code cache, since CodeCache::do_unloading is not called.
662 CodeCache::blobs_do(code_roots);
663 }
664 // Verify that the code cache contents are not subject to
665 // movement by a scavenging collection.
666 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
667 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
668 }
669
670 }
671
672 void GenCollectedHeap::gen_process_roots(int level,
673 bool younger_gens_as_roots,
674 bool activate_scope,
675 ScanningOption so,
676 bool only_strong_roots,
677 OopsInGenClosure* not_older_gens,
678 OopsInGenClosure* older_gens,
679 CLDClosure* cld_closure) {
680 const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
681
682 bool is_moving_collection = false;
683 if (level == 0 || is_adjust_phase) {
684 // young collections are always moving
685 is_moving_collection = true;
686 }
687
688 MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
689 OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
690 CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
691
692 process_roots(activate_scope, so,
693 not_older_gens, weak_roots,
694 cld_closure, weak_cld_closure,
695 &mark_code_closure);
696
697 if (younger_gens_as_roots) {
698 if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
699 if (level == 1) {
700 not_older_gens->set_generation(_young_gen);
701 _young_gen->oop_iterate(not_older_gens);
702 }
703 not_older_gens->reset_generation();
704 }
705 }
706 // When collection is parallel, all threads get to cooperate to do
707 // older-gen scanning.
708 if (level == 0) {
709 older_gens->set_generation(_old_gen);
710 rem_set()->younger_refs_iterate(_old_gen, older_gens);
711 older_gens->reset_generation();
712 }
713
714 _process_strong_tasks->all_tasks_completed();
715 }
716
717
718 class AlwaysTrueClosure: public BoolObjectClosure {
719 public:
720 bool do_object_b(oop p) { return true; }
721 };
722 static AlwaysTrueClosure always_true;
723
724 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
725 JNIHandles::weak_oops_do(&always_true, root_closure);
726 _young_gen->ref_processor()->weak_oops_do(root_closure);
727 _old_gen->ref_processor()->weak_oops_do(root_closure);
728 }
729
730 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
731 void GenCollectedHeap:: \
732 oop_since_save_marks_iterate(int level, \
733 OopClosureType* cur, \
734 OopClosureType* older) { \
735 if (level == 0) { \
736 _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
737 _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \
738 } else { \
739 _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
740 } \
741 }
742
743 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
744
745 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
746
747 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
748 if (level == 0 && !_young_gen->no_allocs_since_save_marks()) {
749 return false;
750 }
751 return _old_gen->no_allocs_since_save_marks();
752 }
753
754 bool GenCollectedHeap::supports_inline_contig_alloc() const {
755 return _young_gen->supports_inline_contig_alloc();
756 }
757
758 HeapWord** GenCollectedHeap::top_addr() const {
759 return _young_gen->top_addr();
760 }
761
762 HeapWord** GenCollectedHeap::end_addr() const {
763 return _young_gen->end_addr();
764 }
765
766 // public collection interfaces
767
768 void GenCollectedHeap::collect(GCCause::Cause cause) {
769 if (should_do_concurrent_full_gc(cause)) {
770 #if INCLUDE_ALL_GCS
771 // mostly concurrent full collection
772 collect_mostly_concurrent(cause);
773 #else // INCLUDE_ALL_GCS
774 ShouldNotReachHere();
775 #endif // INCLUDE_ALL_GCS
776 } else if (cause == GCCause::_wb_young_gc) {
777 // minor collection for WhiteBox API
778 collect(cause, 0 /* young */);
779 } else {
780 #ifdef ASSERT
781 if (cause == GCCause::_scavenge_alot) {
782 // minor collection only
783 collect(cause, 0 /* young */);
784 } else {
785 // Stop-the-world full collection
786 collect(cause, 1 /* old */);
787 }
788 #else
789 // Stop-the-world full collection
790 collect(cause, 1 /* old */);
791 #endif
792 }
793 }
794
795 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
796 // The caller doesn't have the Heap_lock
797 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
798 MutexLocker ml(Heap_lock);
799 collect_locked(cause, max_level);
800 }
801
802 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
803 // The caller has the Heap_lock
804 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
805 collect_locked(cause, 1 /* old */);
806 }
807
808 // this is the private collection interface
809 // The Heap_lock is expected to be held on entry.
810
811 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
812 // Read the GC count while holding the Heap_lock
813 unsigned int gc_count_before = total_collections();
814 unsigned int full_gc_count_before = total_full_collections();
815 {
816 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
817 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
818 cause, max_level);
819 VMThread::execute(&op);
820 }
821 }
822
823 #if INCLUDE_ALL_GCS
824 bool GenCollectedHeap::create_cms_collector() {
825
826 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
827 "Unexpected generation kinds");
828 // Skip two header words in the block content verification
829 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
830 CMSCollector* collector = new CMSCollector(
831 (ConcurrentMarkSweepGeneration*)_old_gen,
832 _rem_set->as_CardTableRS(),
833 (ConcurrentMarkSweepPolicy*) collector_policy());
834
835 if (collector == NULL || !collector->completed_initialization()) {
836 if (collector) {
837 delete collector; // Be nice in embedded situation
838 }
841 }
842 return true; // success
843 }
844
845 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
846 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
847
848 MutexLocker ml(Heap_lock);
849 // Read the GC counts while holding the Heap_lock
850 unsigned int full_gc_count_before = total_full_collections();
851 unsigned int gc_count_before = total_collections();
852 {
853 MutexUnlocker mu(Heap_lock);
854 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
855 VMThread::execute(&op);
856 }
857 }
858 #endif // INCLUDE_ALL_GCS
859
860 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
861 do_full_collection(clear_all_soft_refs, 1 /* old */);
862 }
863
864 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
865 int max_level) {
866 int local_max_level;
867 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
868 gc_cause() == GCCause::_gc_locker) {
869 local_max_level = 0;
870 } else {
871 local_max_level = max_level;
872 }
873
874 do_collection(true /* full */,
875 clear_all_soft_refs /* clear_all_soft_refs */,
876 0 /* size */,
877 false /* is_tlab */,
878 local_max_level /* max_level */);
879 // Hack XXX FIX ME !!!
880 // A scavenge may not have been attempted, or may have
881 // been attempted and failed, because the old gen was too full
882 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
883 incremental_collection_will_fail(false /* don't consult_young */)) {
884 if (PrintGCDetails) {
885 gclog_or_tty->print_cr("GC locker: Trying a full collection "
886 "because scavenge failed");
887 }
888 // This time allow the old gen to be collected as well
889 do_collection(true /* full */,
890 clear_all_soft_refs /* clear_all_soft_refs */,
891 0 /* size */,
892 false /* is_tlab */,
893 1 /* old */ /* max_level */);
894 }
895 }
896
897 bool GenCollectedHeap::is_in_young(oop p) {
898 bool result = ((HeapWord*)p) < _old_gen->reserved().start();
899 assert(result == _young_gen->is_in_reserved(p),
900 err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
901 return result;
902 }
903
904 // Returns "TRUE" iff "p" points into the committed areas of the heap.
905 bool GenCollectedHeap::is_in(const void* p) const {
906 return _young_gen->is_in(p) || _old_gen->is_in(p);
907 }
908
909 #ifdef ASSERT
910 // Don't implement this by using is_in_young(). This method is used
911 // in some cases to check that is_in_young() is correct.
912 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
913 assert(is_in_reserved(p) || p == NULL,
1096
1097 void GenCollectedHeap::save_marks() {
1098 _young_gen->save_marks();
1099 _old_gen->save_marks();
1100 }
1101
1102 GenCollectedHeap* GenCollectedHeap::heap() {
1103 CollectedHeap* heap = Universe::heap();
1104 assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1105 assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1106 return (GenCollectedHeap*)heap;
1107 }
1108
1109 void GenCollectedHeap::prepare_for_compaction() {
1110 // Start by compacting into same gen.
1111 CompactPoint cp(_old_gen);
1112 _old_gen->prepare_for_compaction(&cp);
1113 _young_gen->prepare_for_compaction(&cp);
1114 }
1115
1116 GCStats* GenCollectedHeap::gc_stats(int level) const {
1117 if (level == 0) {
1118 return _young_gen->gc_stats();
1119 } else {
1120 return _old_gen->gc_stats();
1121 }
1122 }
1123
1124 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1125 if (!silent) {
1126 gclog_or_tty->print("%s", _old_gen->name());
1127 gclog_or_tty->print(" ");
1128 }
1129 _old_gen->verify();
1130
1131 if (!silent) {
1132 gclog_or_tty->print("%s", _young_gen->name());
1133 gclog_or_tty->print(" ");
1134 }
1135 _young_gen->verify();
1136
1137 if (!silent) {
1138 gclog_or_tty->print("remset ");
1139 }
1140 rem_set()->verify();
1141 }
1271 }
1272 }
1273 #endif // not PRODUCT
1274
1275 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1276 public:
1277 void do_generation(Generation* gen) {
1278 gen->ensure_parsability();
1279 }
1280 };
1281
1282 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1283 CollectedHeap::ensure_parsability(retire_tlabs);
1284 GenEnsureParsabilityClosure ep_cl;
1285 generation_iterate(&ep_cl, false);
1286 }
1287
1288 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1289 oop obj,
1290 size_t obj_size) {
1291 guarantee(old_gen->level() == 1, "We only get here with an old generation");
1292 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1293 HeapWord* result = NULL;
1294
1295 result = old_gen->expand_and_allocate(obj_size, false);
1296
1297 if (result != NULL) {
1298 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1299 }
1300 return oop(result);
1301 }
1302
1303 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1304 jlong _time; // in ms
1305 jlong _now; // in ms
1306
1307 public:
1308 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1309
1310 jlong time() { return _time; }
1311
|
110
111 char* heap_address;
112 ReservedSpace heap_rs;
113
114 size_t heap_alignment = collector_policy()->heap_alignment();
115
116 heap_address = allocate(heap_alignment, &heap_rs);
117
118 if (!heap_rs.is_reserved()) {
119 vm_shutdown_during_initialization(
120 "Could not reserve enough space for object heap");
121 return JNI_ENOMEM;
122 }
123
124 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
125
126 _rem_set = collector_policy()->create_rem_set(reserved_region());
127 set_barrier_set(rem_set()->bs());
128
129 ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false);
130 _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set());
131 heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size());
132
133 ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false);
134 _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
135 clear_incremental_collection_failed();
136
137 #if INCLUDE_ALL_GCS
138 // If we are running CMS, create the collector responsible
139 // for collecting the CMS generations.
140 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
141 bool success = create_cms_collector();
142 if (!success) return JNI_ENOMEM;
143 }
144 #endif // INCLUDE_ALL_GCS
145
146 return JNI_OK;
147 }
148
149 char* GenCollectedHeap::allocate(size_t alignment,
150 ReservedSpace* heap_rs){
151 // Now figure out the total size.
152 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
153 assert(alignment % pageSize == 0, "Must be");
154
185
186 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
187 _old_gen->capacity(),
188 def_new_gen->from()->capacity());
189 policy->initialize_gc_policy_counters();
190 }
191
192 void GenCollectedHeap::ref_processing_init() {
193 _young_gen->ref_processor_init();
194 _old_gen->ref_processor_init();
195 }
196
197 size_t GenCollectedHeap::capacity() const {
198 return _young_gen->capacity() + _old_gen->capacity();
199 }
200
201 size_t GenCollectedHeap::used() const {
202 return _young_gen->used() + _old_gen->used();
203 }
204
205 void GenCollectedHeap::save_used_regions() {
206 _old_gen->save_used_region();
207 _young_gen->save_used_region();
208 }
209
210 size_t GenCollectedHeap::max_capacity() const {
211 return _young_gen->max_capacity() + _old_gen->max_capacity();
212 }
213
214 // Update the _full_collections_completed counter
215 // at the end of a stop-world full GC.
216 unsigned int GenCollectedHeap::update_full_collections_completed() {
217 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
218 assert(_full_collections_completed <= _total_full_collections,
219 "Can't complete more collections than were started");
220 _full_collections_completed = _total_full_collections;
221 ml.notify_all();
222 return _full_collections_completed;
223 }
224
225 // Update the _full_collections_completed counter, as appropriate,
226 // at the end of a concurrent GC cycle. Note the conditional update
309 bool is_tlab, bool run_verification, bool clear_soft_refs,
310 bool restore_marks_for_biased_locking) {
311 // Timer for individual generations. Last argument is false: no CR
312 // FIXME: We should try to start the timing earlier to cover more of the GC pause
313 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
314 // so we can assume here that the next GC id is what we want.
315 GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
316 TraceCollectorStats tcs(gen->counters());
317 TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
318
319 size_t prev_used = gen->used();
320 gen->stat_record()->invocations++;
321 gen->stat_record()->accumulated_time.start();
322
323 // Must be done anew before each collection because
324 // a previous collection will do mangling and will
325 // change top of some spaces.
326 record_gen_tops_before_GC();
327
328 if (PrintGC && Verbose) {
329 // I didn't want to change the logging when removing the level concept,
330 // but I guess this logging could say young/old or something instead of 0/1.
331 uint level;
332 if (gen == GenCollectedHeap::heap()->young_gen()) {
333 level = 0;
334 } else {
335 level = 1;
336 }
337 gclog_or_tty->print("level=%u invoke=%d size=" SIZE_FORMAT,
338 level,
339 gen->stat_record()->invocations,
340 size * HeapWordSize);
341 }
342
343 if (run_verification && VerifyBeforeGC) {
344 HandleMark hm; // Discard invalid handles created during verification
345 Universe::verify(" VerifyBeforeGC:");
346 }
347 COMPILER2_PRESENT(DerivedPointerTable::clear());
348
349 if (restore_marks_for_biased_locking) {
350 // We perform this mark word preservation work lazily
351 // because it's only at this point that we know whether we
352 // absolutely have to do it; we want to avoid doing it for
353 // scavenge-only collections where it's unnecessary
354 BiasedLocking::preserve_marks();
355 }
356
357 // Do collection work
358 {
379 // are guaranteed to have empty discovered ref lists.
380 if (rp->discovery_is_atomic()) {
381 rp->enable_discovery();
382 rp->setup_policy(clear_soft_refs);
383 } else {
384 // collect() below will enable discovery as appropriate
385 }
386 gen->collect(full, clear_soft_refs, size, is_tlab);
387 if (!rp->enqueuing_is_done()) {
388 rp->enqueue_discovered_references();
389 } else {
390 rp->set_enqueuing_is_done(false);
391 }
392 rp->verify_no_references_recorded();
393 }
394
395 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
396
397 gen->stat_record()->accumulated_time.stop();
398
399 update_gc_stats(gen, full);
400
401 if (run_verification && VerifyAfterGC) {
402 HandleMark hm; // Discard invalid handles created during verification
403 Universe::verify(" VerifyAfterGC:");
404 }
405
406 if (PrintGCDetails) {
407 gclog_or_tty->print(":");
408 gen->print_heap_change(prev_used);
409 }
410 }
411
412 void GenCollectedHeap::do_collection(bool full,
413 bool clear_all_soft_refs,
414 size_t size,
415 bool is_tlab,
416 Generation::Type max_generation) {
417 ResourceMark rm;
418 DEBUG_ONLY(Thread* my_thread = Thread::current();)
419
420 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
421 assert(my_thread->is_VM_thread() ||
422 my_thread->is_ConcurrentGC_thread(),
423 "incorrect thread type capability");
424 assert(Heap_lock->is_locked(),
425 "the requesting thread should have the Heap_lock");
426 guarantee(!is_gc_active(), "collection is not reentrant");
427
428 if (GC_locker::check_active_before_gc()) {
429 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
430 }
431
432 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
433 collector_policy()->should_clear_all_soft_refs();
434
435 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
436
437 const size_t metadata_prev_used = MetaspaceAux::used_bytes();
438
439 print_heap_before_gc();
440
441 {
442 FlagSetting fl(_is_gc_active, true);
443
444 bool complete = full && (max_generation == Generation::Old);
445 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
446 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
447 // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
448 // so we can assume here that the next GC id is what we want.
449 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
450
451 gc_prologue(complete);
452 increment_total_collections(complete);
453
454 size_t gch_prev_used = used();
455 bool run_verification = total_collections() >= VerifyGCStartAt;
456
457 bool prepared_for_verification = false;
458 bool collected_old = false;
459 bool old_collects_young = complete &&
460 _old_gen->full_collects_younger_generations();
461 if (!old_collects_young &&
462 _young_gen->should_collect(full, size, is_tlab)) {
463 if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
464 prepare_for_verify();
465 prepared_for_verification = true;
466 }
467
468 assert(!_young_gen->performs_in_place_marking(), "No young generation do in place marking");
469 collect_generation(_young_gen,
470 full,
471 size,
472 is_tlab,
473 run_verification && VerifyGCLevel <= 0,
474 do_clear_all_soft_refs,
475 false);
476
477 if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
478 size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
479 // Allocation request was met by young GC.
480 size = 0;
481 }
482 }
483
484 bool must_restore_marks_for_biased_locking = false;
485
486 if (max_generation == Generation::Old && _old_gen->should_collect(full, size, is_tlab)) {
487 if (!complete) {
488 // The full_collections increment was missed above.
489 increment_total_full_collections();
490 }
491
492 pre_full_gc_dump(NULL); // do any pre full gc dumps
493
494 if (!prepared_for_verification && run_verification &&
495 VerifyGCLevel <= 1 && VerifyBeforeGC) {
496 prepare_for_verify();
497 }
498
499 assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking");
500 collect_generation(_old_gen,
501 full,
502 size,
503 is_tlab,
504 run_verification && VerifyGCLevel <= 1,
505 do_clear_all_soft_refs,
506 true);
507
508 must_restore_marks_for_biased_locking = true;
509 collected_old = true;
510 }
511
512 // Update "complete" boolean wrt what actually transpired --
513 // for instance, a promotion failure could have led to
514 // a whole heap collection.
515 complete = complete || collected_old;
516
517 if (complete) { // We did a "major" collection
518 // FIXME: See comment at pre_full_gc_dump call
519 post_full_gc_dump(NULL); // do any post full gc dumps
520 }
521
522 if (PrintGCDetails) {
523 print_heap_change(gch_prev_used);
524
525 // Print metaspace info for full GC with PrintGCDetails flag.
526 if (complete) {
527 MetaspaceAux::print_metaspace_change(metadata_prev_used);
528 }
529 }
530
531 // Adjust generation sizes.
532 if (collected_old) {
533 _old_gen->compute_new_size();
534 }
535 _young_gen->compute_new_size();
536
537 if (complete) {
538 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
539 ClassLoaderDataGraph::purge();
540 MetaspaceAux::verify_metrics();
541 // Resize the metaspace capacity after full collections
542 MetaspaceGC::compute_new_size();
543 update_full_collections_completed();
544 }
545
546 // Track memory usage and detect low memory after GC finishes
547 MemoryService::track_memory_usage();
548
549 gc_epilogue(complete);
550
551 if (must_restore_marks_for_biased_locking) {
552 BiasedLocking::restore_marks();
655 assert(code_roots != NULL, "must supply closure for code cache");
656
657 // We only visit parts of the CodeCache when scavenging.
658 CodeCache::scavenge_root_nmethods_do(code_roots);
659 }
660 if (so & SO_AllCodeCache) {
661 assert(code_roots != NULL, "must supply closure for code cache");
662
663 // CMSCollector uses this to do intermediate-strength collections.
664 // We scan the entire code cache, since CodeCache::do_unloading is not called.
665 CodeCache::blobs_do(code_roots);
666 }
667 // Verify that the code cache contents are not subject to
668 // movement by a scavenging collection.
669 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
670 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
671 }
672
673 }
674
675 void GenCollectedHeap::gen_process_roots(Generation::Type type,
676 bool younger_gens_as_roots,
677 bool activate_scope,
678 ScanningOption so,
679 bool only_strong_roots,
680 OopsInGenClosure* not_older_gens,
681 OopsInGenClosure* older_gens,
682 CLDClosure* cld_closure) {
683 const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
684
685 bool is_moving_collection = false;
686 if (type == Generation::Young || is_adjust_phase) {
687 // young collections are always moving
688 is_moving_collection = true;
689 }
690
691 MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
692 OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
693 CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
694
695 process_roots(activate_scope, so,
696 not_older_gens, weak_roots,
697 cld_closure, weak_cld_closure,
698 &mark_code_closure);
699
700 if (younger_gens_as_roots) {
701 if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
702 if (type == Generation::Old) {
703 not_older_gens->set_generation(_young_gen);
704 _young_gen->oop_iterate(not_older_gens);
705 }
706 not_older_gens->reset_generation();
707 }
708 }
709 // When collection is parallel, all threads get to cooperate to do
710 // old generation scanning.
711 if (type == Generation::Young) {
712 older_gens->set_generation(_old_gen);
713 rem_set()->younger_refs_iterate(_old_gen, older_gens);
714 older_gens->reset_generation();
715 }
716
717 _process_strong_tasks->all_tasks_completed();
718 }
719
720
721 class AlwaysTrueClosure: public BoolObjectClosure {
722 public:
723 bool do_object_b(oop p) { return true; }
724 };
725 static AlwaysTrueClosure always_true;
726
727 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
728 JNIHandles::weak_oops_do(&always_true, root_closure);
729 _young_gen->ref_processor()->weak_oops_do(root_closure);
730 _old_gen->ref_processor()->weak_oops_do(root_closure);
731 }
732
733 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
734 void GenCollectedHeap:: \
735 oop_since_save_marks_iterate(Generation::Type gen, \
736 OopClosureType* cur, \
737 OopClosureType* older) { \
738 if (gen == Generation::Young) { \
739 _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
740 _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \
741 } else { \
742 _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \
743 } \
744 }
745
746 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
747
748 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
749
750 bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) {
751 if (include_young && !_young_gen->no_allocs_since_save_marks()) {
752 return false;
753 }
754 return _old_gen->no_allocs_since_save_marks();
755 }
756
757 bool GenCollectedHeap::supports_inline_contig_alloc() const {
758 return _young_gen->supports_inline_contig_alloc();
759 }
760
761 HeapWord** GenCollectedHeap::top_addr() const {
762 return _young_gen->top_addr();
763 }
764
765 HeapWord** GenCollectedHeap::end_addr() const {
766 return _young_gen->end_addr();
767 }
768
769 // public collection interfaces
770
771 void GenCollectedHeap::collect(GCCause::Cause cause) {
772 if (should_do_concurrent_full_gc(cause)) {
773 #if INCLUDE_ALL_GCS
774 // mostly concurrent full collection
775 collect_mostly_concurrent(cause);
776 #else // INCLUDE_ALL_GCS
777 ShouldNotReachHere();
778 #endif // INCLUDE_ALL_GCS
779 } else if (cause == GCCause::_wb_young_gc) {
780 // minor collection for WhiteBox API
781 collect(cause, Generation::Young);
782 } else {
783 #ifdef ASSERT
784 if (cause == GCCause::_scavenge_alot) {
785 // minor collection only
786 collect(cause, Generation::Young);
787 } else {
788 // Stop-the-world full collection
789 collect(cause, Generation::Old);
790 }
791 #else
792 // Stop-the-world full collection
793 collect(cause, Generation::Old);
794 #endif
795 }
796 }
797
798 void GenCollectedHeap::collect(GCCause::Cause cause, Generation::Type max_generation) {
799 // The caller doesn't have the Heap_lock
800 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
801 MutexLocker ml(Heap_lock);
802 collect_locked(cause, max_generation);
803 }
804
805 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
806 // The caller has the Heap_lock
807 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
808 collect_locked(cause, Generation::Old);
809 }
810
811 // this is the private collection interface
812 // The Heap_lock is expected to be held on entry.
813
814 void GenCollectedHeap::collect_locked(GCCause::Cause cause, Generation::Type max_generation) {
815 // Read the GC count while holding the Heap_lock
816 unsigned int gc_count_before = total_collections();
817 unsigned int full_gc_count_before = total_full_collections();
818 {
819 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
820 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
821 cause, max_generation);
822 VMThread::execute(&op);
823 }
824 }
825
826 #if INCLUDE_ALL_GCS
827 bool GenCollectedHeap::create_cms_collector() {
828
829 assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
830 "Unexpected generation kinds");
831 // Skip two header words in the block content verification
832 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
833 CMSCollector* collector = new CMSCollector(
834 (ConcurrentMarkSweepGeneration*)_old_gen,
835 _rem_set->as_CardTableRS(),
836 (ConcurrentMarkSweepPolicy*) collector_policy());
837
838 if (collector == NULL || !collector->completed_initialization()) {
839 if (collector) {
840 delete collector; // Be nice in embedded situation
841 }
844 }
845 return true; // success
846 }
847
848 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
849 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
850
851 MutexLocker ml(Heap_lock);
852 // Read the GC counts while holding the Heap_lock
853 unsigned int full_gc_count_before = total_full_collections();
854 unsigned int gc_count_before = total_collections();
855 {
856 MutexUnlocker mu(Heap_lock);
857 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
858 VMThread::execute(&op);
859 }
860 }
861 #endif // INCLUDE_ALL_GCS
862
863 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
864 do_full_collection(clear_all_soft_refs, Generation::Old);
865 }
866
867 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
868 Generation::Type last_generation) {
869 Generation::Type local_last_generation;
870 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
871 gc_cause() == GCCause::_gc_locker) {
872 local_last_generation = Generation::Young;
873 } else {
874 local_last_generation = last_generation;
875 }
876
877 do_collection(true /* full */,
878 clear_all_soft_refs /* clear_all_soft_refs */,
879 0 /* size */,
880 false /* is_tlab */,
881 local_last_generation /* last_generation */);
882 // Hack XXX FIX ME !!!
883 // A scavenge may not have been attempted, or may have
884 // been attempted and failed, because the old gen was too full
885 if (local_last_generation == Generation::Young && gc_cause() == GCCause::_gc_locker &&
886 incremental_collection_will_fail(false /* don't consult_young */)) {
887 if (PrintGCDetails) {
888 gclog_or_tty->print_cr("GC locker: Trying a full collection "
889 "because scavenge failed");
890 }
891 // This time allow the old gen to be collected as well
892 do_collection(true /* full */,
893 clear_all_soft_refs /* clear_all_soft_refs */,
894 0 /* size */,
895 false /* is_tlab */,
896 Generation::Old /* last_generation */);
897 }
898 }
899
900 bool GenCollectedHeap::is_in_young(oop p) {
901 bool result = ((HeapWord*)p) < _old_gen->reserved().start();
902 assert(result == _young_gen->is_in_reserved(p),
903 err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)));
904 return result;
905 }
906
907 // Returns "TRUE" iff "p" points into the committed areas of the heap.
908 bool GenCollectedHeap::is_in(const void* p) const {
909 return _young_gen->is_in(p) || _old_gen->is_in(p);
910 }
911
912 #ifdef ASSERT
913 // Don't implement this by using is_in_young(). This method is used
914 // in some cases to check that is_in_young() is correct.
915 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
916 assert(is_in_reserved(p) || p == NULL,
1099
1100 void GenCollectedHeap::save_marks() {
1101 _young_gen->save_marks();
1102 _old_gen->save_marks();
1103 }
1104
1105 GenCollectedHeap* GenCollectedHeap::heap() {
1106 CollectedHeap* heap = Universe::heap();
1107 assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1108 assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
1109 return (GenCollectedHeap*)heap;
1110 }
1111
1112 void GenCollectedHeap::prepare_for_compaction() {
1113 // Start by compacting into same gen.
1114 CompactPoint cp(_old_gen);
1115 _old_gen->prepare_for_compaction(&cp);
1116 _young_gen->prepare_for_compaction(&cp);
1117 }
1118
1119 GCStats* GenCollectedHeap::gc_stats(Generation* gen) const {
1120 return gen->gc_stats();
1121 }
1122
1123 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1124 if (!silent) {
1125 gclog_or_tty->print("%s", _old_gen->name());
1126 gclog_or_tty->print(" ");
1127 }
1128 _old_gen->verify();
1129
1130 if (!silent) {
1131 gclog_or_tty->print("%s", _young_gen->name());
1132 gclog_or_tty->print(" ");
1133 }
1134 _young_gen->verify();
1135
1136 if (!silent) {
1137 gclog_or_tty->print("remset ");
1138 }
1139 rem_set()->verify();
1140 }
1270 }
1271 }
1272 #endif // not PRODUCT
1273
1274 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1275 public:
1276 void do_generation(Generation* gen) {
1277 gen->ensure_parsability();
1278 }
1279 };
1280
1281 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1282 CollectedHeap::ensure_parsability(retire_tlabs);
1283 GenEnsureParsabilityClosure ep_cl;
1284 generation_iterate(&ep_cl, false);
1285 }
1286
1287 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1288 oop obj,
1289 size_t obj_size) {
1290 guarantee(old_gen == _old_gen, "We only get here with an old generation");
1291 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1292 HeapWord* result = NULL;
1293
1294 result = old_gen->expand_and_allocate(obj_size, false);
1295
1296 if (result != NULL) {
1297 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1298 }
1299 return oop(result);
1300 }
1301
1302 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1303 jlong _time; // in ms
1304 jlong _now; // in ms
1305
1306 public:
1307 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1308
1309 jlong time() { return _time; }
1310
|