272 bool is_tlab,
273 bool first_only) {
274 HeapWord* res = NULL;
275
276 if (_young_gen->should_allocate(size, is_tlab)) {
277 res = _young_gen->allocate(size, is_tlab);
278 if (res != NULL || first_only) {
279 return res;
280 }
281 }
282
283 if (_old_gen->should_allocate(size, is_tlab)) {
284 res = _old_gen->allocate(size, is_tlab);
285 }
286
287 return res;
288 }
289
290 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
291 bool* gc_overhead_limit_was_exceeded) {
292 return collector_policy()->mem_allocate_work(size,
293 false /* is_tlab */,
294 gc_overhead_limit_was_exceeded);
295 }
296
297 bool GenCollectedHeap::must_clear_all_soft_refs() {
298 return _gc_cause == GCCause::_last_ditch_collection;
299 }
300
301 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
302 if (!UseConcMarkSweepGC) {
303 return false;
304 }
305
306 switch (cause) {
307 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
308 case GCCause::_java_lang_system_gc:
309 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
310 default: return false;
311 }
312 }
534 }
535
536 // Track memory usage and detect low memory after GC finishes
537 MemoryService::track_memory_usage();
538
539 gc_epilogue(complete);
540
541 if (must_restore_marks_for_biased_locking) {
542 BiasedLocking::restore_marks();
543 }
544 }
545
546 print_heap_after_gc();
547
548 #ifdef TRACESPINNING
549 ParallelTaskTerminator::print_termination_counts();
550 #endif
551 }
552
553 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
554 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
555 }
556
557 #ifdef ASSERT
558 class AssertNonScavengableClosure: public OopClosure {
559 public:
560 virtual void do_oop(oop* p) {
561 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
562 "Referent should not be scavengable."); }
563 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
564 };
565 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
566 #endif
567
568 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
569 ScanningOption so,
570 OopClosure* strong_roots,
571 OopClosure* weak_roots,
572 CLDClosure* strong_cld_closure,
573 CLDClosure* weak_cld_closure,
574 CodeBlobClosure* code_roots) {
971 }
972
973 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
974 assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
975 if (_young_gen->supports_tlab_allocation()) {
976 return _young_gen->tlab_used();
977 }
978 return 0;
979 }
980
981 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
982 assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
983 if (_young_gen->supports_tlab_allocation()) {
984 return _young_gen->unsafe_max_tlab_alloc();
985 }
986 return 0;
987 }
988
989 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
990 bool gc_overhead_limit_was_exceeded;
991 return collector_policy()->mem_allocate_work(size /* size */,
992 true /* is_tlab */,
993 &gc_overhead_limit_was_exceeded);
994 }
995
996 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
997 // from the list headed by "*prev_ptr".
998 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
999 bool first = true;
1000 size_t min_size = 0; // "first" makes this conceptually infinite.
1001 ScratchBlock **smallest_ptr, *smallest;
1002 ScratchBlock *cur = *prev_ptr;
1003 while (cur) {
1004 assert(*prev_ptr == cur, "just checking");
1005 if (first || cur->num_words < min_size) {
1006 smallest_ptr = prev_ptr;
1007 smallest = cur;
1008 min_size = smallest->num_words;
1009 first = false;
1010 }
1011 prev_ptr = &cur->next;
|
272 bool is_tlab,
273 bool first_only) {
274 HeapWord* res = NULL;
275
276 if (_young_gen->should_allocate(size, is_tlab)) {
277 res = _young_gen->allocate(size, is_tlab);
278 if (res != NULL || first_only) {
279 return res;
280 }
281 }
282
283 if (_old_gen->should_allocate(size, is_tlab)) {
284 res = _old_gen->allocate(size, is_tlab);
285 }
286
287 return res;
288 }
289
290 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
291 bool* gc_overhead_limit_was_exceeded) {
292 return gen_policy()->mem_allocate_work(size,
293 false /* is_tlab */,
294 gc_overhead_limit_was_exceeded);
295 }
296
297 bool GenCollectedHeap::must_clear_all_soft_refs() {
298 return _gc_cause == GCCause::_last_ditch_collection;
299 }
300
301 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
302 if (!UseConcMarkSweepGC) {
303 return false;
304 }
305
306 switch (cause) {
307 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
308 case GCCause::_java_lang_system_gc:
309 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
310 default: return false;
311 }
312 }
534 }
535
536 // Track memory usage and detect low memory after GC finishes
537 MemoryService::track_memory_usage();
538
539 gc_epilogue(complete);
540
541 if (must_restore_marks_for_biased_locking) {
542 BiasedLocking::restore_marks();
543 }
544 }
545
546 print_heap_after_gc();
547
548 #ifdef TRACESPINNING
549 ParallelTaskTerminator::print_termination_counts();
550 #endif
551 }
552
553 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
554 return gen_policy()->satisfy_failed_allocation(size, is_tlab);
555 }
556
557 #ifdef ASSERT
558 class AssertNonScavengableClosure: public OopClosure {
559 public:
560 virtual void do_oop(oop* p) {
561 assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
562 "Referent should not be scavengable."); }
563 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
564 };
565 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
566 #endif
567
568 void GenCollectedHeap::process_roots(StrongRootsScope* scope,
569 ScanningOption so,
570 OopClosure* strong_roots,
571 OopClosure* weak_roots,
572 CLDClosure* strong_cld_closure,
573 CLDClosure* weak_cld_closure,
574 CodeBlobClosure* code_roots) {
971 }
972
973 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
974 assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
975 if (_young_gen->supports_tlab_allocation()) {
976 return _young_gen->tlab_used();
977 }
978 return 0;
979 }
980
981 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
982 assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
983 if (_young_gen->supports_tlab_allocation()) {
984 return _young_gen->unsafe_max_tlab_alloc();
985 }
986 return 0;
987 }
988
989 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
990 bool gc_overhead_limit_was_exceeded;
991 return gen_policy()->mem_allocate_work(size /* size */,
992 true /* is_tlab */,
993 &gc_overhead_limit_was_exceeded);
994 }
995
996 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
997 // from the list headed by "*prev_ptr".
998 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
999 bool first = true;
1000 size_t min_size = 0; // "first" makes this conceptually infinite.
1001 ScratchBlock **smallest_ptr, *smallest;
1002 ScratchBlock *cur = *prev_ptr;
1003 while (cur) {
1004 assert(*prev_ptr == cur, "just checking");
1005 if (first || cur->num_words < min_size) {
1006 smallest_ptr = prev_ptr;
1007 smallest = cur;
1008 min_size = smallest->num_words;
1009 first = false;
1010 }
1011 prev_ptr = &cur->next;
|