10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "memory/allocation.hpp"
26 #include "gc_implementation/shenandoah/brooksPointer.hpp"
27 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
28 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
30 #include "memory/space.inline.hpp"
31 #include "memory/universe.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/mutexLocker.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/safepoint.hpp"
37
38 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
39 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
40 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
41 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
42 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
43 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
44 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
45 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
46 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
47
48 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
49 size_t size_words, size_t index, bool committed) :
50 _heap(heap),
51 _pacer(ShenandoahPacing ? heap->pacer() : NULL),
52 _region_number(index),
53 _live_data(0),
54 _tlab_allocs(0),
55 _gclab_allocs(0),
56 _shared_allocs(0),
57 _reserved(MemRegion(start, size_words)),
58 _new_top(NULL),
59 _state(committed ? _empty_committed : _empty_uncommitted),
60 _empty_time(os::elapsedTime()),
61 _critical_pins(0) {
62
63 ContiguousSpace::initialize(_reserved, true, committed);
64 }
65
66 size_t ShenandoahHeapRegion::region_number() const {
67 return _region_number;
68 }
69
70 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
71 ResourceMark rm;
72 stringStream ss;
73 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method);
74 print_on(&ss);
75 fatal(ss.as_string());
76 }
77
78 void ShenandoahHeapRegion::make_regular_allocation() {
79 _heap->assert_heaplock_owned_by_current_thread();
80 switch (_state) {
81 case _empty_uncommitted:
172 }
173 }
174
175 void ShenandoahHeapRegion::make_pinned() {
176 _heap->assert_heaplock_owned_by_current_thread();
177 switch (_state) {
178 case _regular:
179 assert (_critical_pins == 0, "sanity");
180 _state = _pinned;
181 case _pinned_cset:
182 case _pinned:
183 _critical_pins++;
184 return;
185 case _humongous_start:
186 assert (_critical_pins == 0, "sanity");
187 _state = _pinned_humongous_start;
188 case _pinned_humongous_start:
189 _critical_pins++;
190 return;
191 case _cset:
192 guarantee(_heap->cancelled_concgc(), "only valid when evac has been cancelled");
193 assert (_critical_pins == 0, "sanity");
194 _state = _pinned_cset;
195 _critical_pins++;
196 return;
197 default:
198 report_illegal_transition("pinning");
199 }
200 }
201
202 void ShenandoahHeapRegion::make_unpinned() {
203 _heap->assert_heaplock_owned_by_current_thread();
204 switch (_state) {
205 case _pinned:
206 assert (_critical_pins > 0, "sanity");
207 _critical_pins--;
208 if (_critical_pins == 0) {
209 _state = _regular;
210 }
211 return;
212 case _regular:
213 case _humongous_start:
214 assert (_critical_pins == 0, "sanity");
215 return;
216 case _pinned_cset:
217 guarantee(_heap->cancelled_concgc(), "only valid when evac has been cancelled");
218 assert (_critical_pins > 0, "sanity");
219 _critical_pins--;
220 if (_critical_pins == 0) {
221 _state = _cset;
222 }
223 return;
224 case _pinned_humongous_start:
225 assert (_critical_pins > 0, "sanity");
226 _critical_pins--;
227 if (_critical_pins == 0) {
228 _state = _humongous_start;
229 }
230 return;
231 default:
232 report_illegal_transition("unpinning");
233 }
234 }
235
236 void ShenandoahHeapRegion::make_cset() {
237 _heap->assert_heaplock_owned_by_current_thread();
323 } else {
324 reset_alloc_metadata();
325 }
326 }
327
328 size_t ShenandoahHeapRegion::get_shared_allocs() const {
329 return _shared_allocs * HeapWordSize;
330 }
331
332 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
333 return _tlab_allocs * HeapWordSize;
334 }
335
336 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
337 return _gclab_allocs * HeapWordSize;
338 }
339
340 void ShenandoahHeapRegion::set_live_data(size_t s) {
341 assert(Thread::current()->is_VM_thread(), "by VM thread");
342 size_t v = s >> LogHeapWordSize;
343 assert(v < max_jint, "sanity");
344 _live_data = (jint)v;
345 }
346
347 size_t ShenandoahHeapRegion::get_live_data_words() const {
348 jint v = OrderAccess::load_acquire((volatile jint*)&_live_data);
349 assert(v >= 0, "sanity");
350 return (size_t)v;
351 }
352
353 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
354 return get_live_data_words() * HeapWordSize;
355 }
356
357 bool ShenandoahHeapRegion::has_live() const {
358 return get_live_data_words() != 0;
359 }
360
361 size_t ShenandoahHeapRegion::garbage() const {
362 assert(used() >= get_live_data_bytes(), err_msg("Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
363 get_live_data_bytes(), used()));
393 st->print("|HC ");
394 break;
395 case _cset:
396 st->print("|CS ");
397 break;
398 case _trash:
399 st->print("|T ");
400 break;
401 case _pinned:
402 st->print("|P ");
403 break;
404 case _pinned_cset:
405 st->print("|CSP");
406 break;
407 default:
408 ShouldNotReachHere();
409 }
410 st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
411 p2i(bottom()), p2i(top()), p2i(end()));
412 st->print("|TAMS " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
413 p2i(_heap->complete_top_at_mark_start(_bottom)),
414 p2i(_heap->next_top_at_mark_start(_bottom)));
415 st->print("|U %3d%%", (int) ((double) used() * 100 / capacity()));
416 st->print("|T %3d%%", (int) ((double) get_tlab_allocs() * 100 / capacity()));
417 st->print("|G %3d%%", (int) ((double) get_gclab_allocs() * 100 / capacity()));
418 st->print("|S %3d%%", (int) ((double) get_shared_allocs() * 100 / capacity()));
419 st->print("|L %3d%%", (int) ((double) get_live_data_bytes() * 100 / capacity()));
420 st->print("|CP " SIZE_FORMAT_W(3), _critical_pins);
421
422 st->cr();
423 }
424
425
426 class ShenandoahSkipUnreachableObjectToOopClosure: public ObjectClosure {
427 ExtendedOopClosure* _cl;
428 bool _skip_unreachable_objects;
429 ShenandoahHeap* _heap;
430
431 public:
432 ShenandoahSkipUnreachableObjectToOopClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
433 _cl(cl), _skip_unreachable_objects(skip_unreachable_objects), _heap(ShenandoahHeap::heap()) {}
434
435 void do_object(oop obj) {
436
437 if ((! _skip_unreachable_objects) || _heap->is_marked_complete(obj)) {
438 #ifdef ASSERT
439 if (_skip_unreachable_objects) {
440 assert(_heap->is_marked_complete(obj), "obj must be live");
441 }
442 #endif
443 obj->oop_iterate(_cl);
444 }
445
446 }
447 };
448
449 void ShenandoahHeapRegion::fill_region() {
450 if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
451 HeapWord* filler = allocate(BrooksPointer::word_size(), ShenandoahHeap::_alloc_shared);
452 HeapWord* obj = allocate(end() - top(), ShenandoahHeap::_alloc_shared);
453 _heap->fill_with_object(obj, end() - obj);
454 BrooksPointer::initialize(oop(obj));
455 }
456 }
457
458 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
459 assert(is_humongous(), "Must be a part of the humongous region");
460 size_t reg_num = region_number();
461 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
462 while (!r->is_humongous_start()) {
463 assert(reg_num > 0, "Sanity");
464 reg_num --;
465 r = _heap->get_region(reg_num);
466 assert(r->is_humongous(), "Must be a part of the humongous region");
467 }
468 assert(r->is_humongous_start(), "Must be");
469 return r;
470 }
471
472 void ShenandoahHeapRegion::recycle() {
473 ContiguousSpace::clear(false);
474 if (ZapUnusedHeapArea) {
475 ContiguousSpace::mangle_unused_area_complete();
476 }
477 clear_live_data();
478 reset_alloc_metadata();
479 // Reset C-TAMS pointer to ensure size-based iteration, everything
480 // in that regions is going to be new objects.
481 _heap->set_complete_top_at_mark_start(bottom(), bottom());
482 // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region.
483 assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear");
484
485 make_empty();
486 }
487
488 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
489 assert(MemRegion(bottom(), end()).contains(p),
490 err_msg("p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
491 p2i(p), p2i(bottom()), p2i(end())));
492 if (p >= top()) {
493 return top();
494 } else {
495 HeapWord* last = bottom() + BrooksPointer::word_size();
496 HeapWord* cur = last;
497 while (cur <= p) {
498 last = cur;
499 cur += oop(cur)->size() + BrooksPointer::word_size();
500 }
501 assert(oop(last)->is_oop(),
502 err_msg(PTR_FORMAT" should be an object start", p2i(last)));
503 return last;
504 }
505 }
506
507 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
508 // Absolute minimums we should not ever break:
509 static const size_t MIN_REGION_SIZE = 256*K;
510 static const size_t MIN_NUM_REGIONS = 10;
511
512 uintx region_size;
513 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
514 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
515 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
516 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
517 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
518 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
519 }
520 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
521 err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
522 ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K);
523 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
524 }
525 if (ShenandoahMinRegionSize < MinTLABSize) {
526 err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
527 ShenandoahMinRegionSize/K, MinTLABSize/K);
528 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
529 }
530 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
531 err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
532 ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K);
533 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
534 }
535 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
536 err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
537 ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
538 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
539 }
540 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
541 region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions,
542 ShenandoahMinRegionSize);
543
544 // Now make sure that we don't go over or under our limits.
545 region_size = MAX2(ShenandoahMinRegionSize, region_size);
546 region_size = MIN2(ShenandoahMaxRegionSize, region_size);
547
548 } else {
549 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
550 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
551 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
552 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
553 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
554 }
555 if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
556 err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
557 ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
558 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
559 }
560 if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
561 err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
562 ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
563 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
564 }
565 region_size = ShenandoahHeapRegionSize;
566 }
567
568 if (1 > ShenandoahHumongousThreshold || ShenandoahHumongousThreshold > 100) {
569 vm_exit_during_initialization("Invalid -XX:ShenandoahHumongousThreshold option, should be within [1..100]");
570 }
571
572 // Make sure region size is at least one large page, if enabled.
573 // Otherwise, mem-protecting one region may falsely protect the adjacent
574 // regions too.
575 if (UseLargePages) {
576 region_size = MAX2(region_size, os::large_page_size());
577 }
578
579 int region_size_log = log2_long((jlong) region_size);
580 // Recalculate the region size to make sure it's a power of
581 // 2. This means that region_size is the largest power of 2 that's
582 // <= what we've calculated so far.
583 region_size = ((uintx)1 << region_size_log);
584
585 // Now, set up the globals.
586 guarantee(RegionSizeBytesShift == 0, "we should only set it once");
587 RegionSizeBytesShift = (size_t)region_size_log;
588
589 guarantee(RegionSizeWordsShift == 0, "we should only set it once");
590 RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
591
592 guarantee(RegionSizeBytes == 0, "we should only set it once");
593 RegionSizeBytes = (size_t)region_size;
594 RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
595 assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
596
597 guarantee(RegionSizeWordsMask == 0, "we should only set it once");
598 RegionSizeWordsMask = RegionSizeWords - 1;
599
600 guarantee(RegionSizeBytesMask == 0, "we should only set it once");
601 RegionSizeBytesMask = RegionSizeBytes - 1;
602
603 guarantee(HumongousThresholdWords == 0, "we should only set it once");
604 HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
605 assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
606
607 guarantee(HumongousThresholdBytes == 0, "we should only set it once");
608 HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
609 assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
610
611 // The rationale for trimming the TLAB sizes has to do with the raciness in
612 // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
613 // about next free size, gets the answer for region #N, goes away for a while, then
614 // tries to allocate in region #N, and fail because some other thread have claimed part
615 // of the region #N, and then the freeset allocation code has to retire the region #N,
616 // before moving the allocation to region #N+1.
617 //
618 // The worst case realizes when "answer" is "region size", which means it could
619 // prematurely retire an entire region. Having smaller TLABs does not fix that
620 // completely, but reduces the probability of too wasteful region retirement.
621 // With current divisor, we will waste no more than 1/8 of region size in the worst
622 // case. This also has a secondary effect on collection set selection: even under
623 // the race, the regions would be at least 7/8 used, which allows relying on
624 // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
625 // below the garbage threshold that would never be considered for collection.
626 guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
627 MaxTLABSizeBytes = MIN2(RegionSizeBytes / 8, HumongousThresholdBytes);
628 assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
629
630 log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
631 log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
632 log_info(gc, init)("Region size byte shift: "SIZE_FORMAT, RegionSizeBytesShift);
633 log_info(gc, init)("Humongous threshold in bytes: "SIZE_FORMAT, HumongousThresholdBytes);
634 log_info(gc, init)("Max TLAB size in bytes: "SIZE_FORMAT, MaxTLABSizeBytes);
635 log_info(gc, init)("Number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
636 }
637
638 void ShenandoahHeapRegion::do_commit() {
639 if (!os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) {
640 report_java_out_of_memory("Unable to commit region");
641 }
642 if (!_heap->commit_bitmap_slice(this)) {
643 report_java_out_of_memory("Unable to commit bitmaps for region");
644 }
645 _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
646 }
647
648 void ShenandoahHeapRegion::do_uncommit() {
649 if (!os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) {
650 report_java_out_of_memory("Unable to uncommit region");
651 }
652 if (!_heap->uncommit_bitmap_slice(this)) {
653 report_java_out_of_memory("Unable to uncommit bitmaps for region");
654 }
655 _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
|
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "memory/allocation.hpp"
26 #include "gc_implementation/shenandoah/brooksPointer.hpp"
27 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
28 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
30 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
31 #include "memory/space.inline.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/java.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/safepoint.hpp"
38
39 size_t ShenandoahHeapRegion::RegionCount = 0;
40 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
41 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
42 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
43 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
44 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
45 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
46 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
47 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
48 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
49 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
50
51 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
52 size_t size_words, size_t index, bool committed) :
53 _heap(heap),
54 _region_number(index),
55 _live_data(0),
56 _reserved(MemRegion(start, size_words)),
57 _tlab_allocs(0),
58 _gclab_allocs(0),
59 _shared_allocs(0),
60 _new_top(NULL),
61 _critical_pins(0),
62 _state(committed ? _empty_committed : _empty_uncommitted),
63 _empty_time(os::elapsedTime()),
64 _pacer(ShenandoahPacing ? heap->pacer() : NULL) {
65
66 ContiguousSpace::initialize(_reserved, true, committed);
67 }
68
69 size_t ShenandoahHeapRegion::region_number() const {
70 return _region_number;
71 }
72
73 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
74 ResourceMark rm;
75 stringStream ss;
76 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method);
77 print_on(&ss);
78 fatal(ss.as_string());
79 }
80
81 void ShenandoahHeapRegion::make_regular_allocation() {
82 _heap->assert_heaplock_owned_by_current_thread();
83 switch (_state) {
84 case _empty_uncommitted:
175 }
176 }
177
178 void ShenandoahHeapRegion::make_pinned() {
179 _heap->assert_heaplock_owned_by_current_thread();
180 switch (_state) {
181 case _regular:
182 assert (_critical_pins == 0, "sanity");
183 _state = _pinned;
184 case _pinned_cset:
185 case _pinned:
186 _critical_pins++;
187 return;
188 case _humongous_start:
189 assert (_critical_pins == 0, "sanity");
190 _state = _pinned_humongous_start;
191 case _pinned_humongous_start:
192 _critical_pins++;
193 return;
194 case _cset:
195 guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
196 assert (_critical_pins == 0, "sanity");
197 _state = _pinned_cset;
198 _critical_pins++;
199 return;
200 default:
201 report_illegal_transition("pinning");
202 }
203 }
204
205 void ShenandoahHeapRegion::make_unpinned() {
206 _heap->assert_heaplock_owned_by_current_thread();
207 switch (_state) {
208 case _pinned:
209 assert (_critical_pins > 0, "sanity");
210 _critical_pins--;
211 if (_critical_pins == 0) {
212 _state = _regular;
213 }
214 return;
215 case _regular:
216 case _humongous_start:
217 assert (_critical_pins == 0, "sanity");
218 return;
219 case _pinned_cset:
220 guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
221 assert (_critical_pins > 0, "sanity");
222 _critical_pins--;
223 if (_critical_pins == 0) {
224 _state = _cset;
225 }
226 return;
227 case _pinned_humongous_start:
228 assert (_critical_pins > 0, "sanity");
229 _critical_pins--;
230 if (_critical_pins == 0) {
231 _state = _humongous_start;
232 }
233 return;
234 default:
235 report_illegal_transition("unpinning");
236 }
237 }
238
239 void ShenandoahHeapRegion::make_cset() {
240 _heap->assert_heaplock_owned_by_current_thread();
326 } else {
327 reset_alloc_metadata();
328 }
329 }
330
331 size_t ShenandoahHeapRegion::get_shared_allocs() const {
332 return _shared_allocs * HeapWordSize;
333 }
334
335 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
336 return _tlab_allocs * HeapWordSize;
337 }
338
339 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
340 return _gclab_allocs * HeapWordSize;
341 }
342
343 void ShenandoahHeapRegion::set_live_data(size_t s) {
344 assert(Thread::current()->is_VM_thread(), "by VM thread");
345 size_t v = s >> LogHeapWordSize;
346 assert(v < (size_t)max_jint, "sanity");
347 _live_data = (jint)v;
348 }
349
350 size_t ShenandoahHeapRegion::get_live_data_words() const {
351 jint v = OrderAccess::load_acquire((volatile jint*)&_live_data);
352 assert(v >= 0, "sanity");
353 return (size_t)v;
354 }
355
356 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
357 return get_live_data_words() * HeapWordSize;
358 }
359
360 bool ShenandoahHeapRegion::has_live() const {
361 return get_live_data_words() != 0;
362 }
363
364 size_t ShenandoahHeapRegion::garbage() const {
365 assert(used() >= get_live_data_bytes(), err_msg("Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
366 get_live_data_bytes(), used()));
396 st->print("|HC ");
397 break;
398 case _cset:
399 st->print("|CS ");
400 break;
401 case _trash:
402 st->print("|T ");
403 break;
404 case _pinned:
405 st->print("|P ");
406 break;
407 case _pinned_cset:
408 st->print("|CSP");
409 break;
410 default:
411 ShouldNotReachHere();
412 }
413 st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
414 p2i(bottom()), p2i(top()), p2i(end()));
415 st->print("|TAMS " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
416 p2i(_heap->complete_marking_context()->top_at_mark_start(region_number())),
417 p2i(_heap->next_marking_context()->top_at_mark_start(region_number())));
418 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
419 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs()));
420 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs()));
421 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs()));
422 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
423 st->print("|CP " SIZE_FORMAT_W(3), _critical_pins);
424
425 st->cr();
426 }
427
428 void ShenandoahHeapRegion::fill_region() {
429 if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
430 HeapWord* filler = allocate(BrooksPointer::word_size(), ShenandoahHeap::_alloc_shared);
431 HeapWord* obj = allocate(end() - top(), ShenandoahHeap::_alloc_shared);
432 _heap->fill_with_object(obj, end() - obj);
433 BrooksPointer::initialize(oop(obj));
434 }
435 }
436
437 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
438 assert(is_humongous(), "Must be a part of the humongous region");
439 size_t reg_num = region_number();
440 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
441 while (!r->is_humongous_start()) {
442 assert(reg_num > 0, "Sanity");
443 reg_num --;
444 r = _heap->get_region(reg_num);
445 assert(r->is_humongous(), "Must be a part of the humongous region");
446 }
447 assert(r->is_humongous_start(), "Must be");
448 return r;
449 }
450
451 void ShenandoahHeapRegion::recycle() {
452 ContiguousSpace::clear(false);
453 if (ZapUnusedHeapArea) {
454 ContiguousSpace::mangle_unused_area_complete();
455 }
456 clear_live_data();
457 reset_alloc_metadata();
458
459 ShenandoahMarkingContext* const compl_ctx = _heap->complete_marking_context();
460
461 // Reset C-TAMS pointer to ensure size-based iteration, everything
462 // in that regions is going to be new objects.
463 compl_ctx->set_top_at_mark_start(region_number(), bottom());
464 // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region.
465 assert(compl_ctx->is_bitmap_clear_range(bottom(), end()), "must be clear");
466
467 make_empty();
468 }
469
470 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
471 assert(MemRegion(bottom(), end()).contains(p),
472 err_msg("p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
473 p2i(p), p2i(bottom()), p2i(end())));
474 if (p >= top()) {
475 return top();
476 } else {
477 HeapWord* last = bottom() + BrooksPointer::word_size();
478 HeapWord* cur = last;
479 while (cur <= p) {
480 last = cur;
481 cur += oop(cur)->size() + BrooksPointer::word_size();
482 }
483 shenandoah_assert_correct(NULL, oop(last));
484 return last;
485 }
486 }
487
488 void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap_size) {
489 // Absolute minimums we should not ever break:
490 static const size_t MIN_REGION_SIZE = 256*K;
491 static const size_t MIN_NUM_REGIONS = 10;
492
493 size_t region_size;
494 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
495 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
496 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
497 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
498 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
499 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
500 }
501 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
502 err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
503 ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K);
504 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
505 }
506 if (ShenandoahMinRegionSize < MinTLABSize) {
507 err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
508 ShenandoahMinRegionSize/K, MinTLABSize/K);
509 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
510 }
511 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
512 err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
513 ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K);
514 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
515 }
516 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
517 err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
518 ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
519 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
520 }
521
522 // We rapidly expand to max_heap_size in most scenarios, so that is the measure
523 // for usual heap sizes. Do not depend on initial_heap_size here.
524 region_size = max_heap_size / ShenandoahTargetNumRegions;
525
526 // Now make sure that we don't go over or under our limits.
527 region_size = MAX2(ShenandoahMinRegionSize, region_size);
528 region_size = MIN2(ShenandoahMaxRegionSize, region_size);
529
530 } else {
531 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
532 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
533 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
534 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
535 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
536 }
537 if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
538 err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
539 ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
540 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
541 }
542 if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
543 err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
544 ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
545 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
546 }
547 region_size = ShenandoahHeapRegionSize;
548 }
549
550 if (1 > ShenandoahHumongousThreshold || ShenandoahHumongousThreshold > 100) {
551 vm_exit_during_initialization("Invalid -XX:ShenandoahHumongousThreshold option, should be within [1..100]");
552 }
553
554 // Make sure region size is at least one large page, if enabled.
555 // Otherwise, mem-protecting one region may falsely protect the adjacent
556 // regions too.
557 if (UseLargePages) {
558 region_size = MAX2(region_size, os::large_page_size());
559 }
560
561 int region_size_log = log2_long((jlong) region_size);
562 // Recalculate the region size to make sure it's a power of
563 // 2. This means that region_size is the largest power of 2 that's
564 // <= what we've calculated so far.
565 region_size = (size_t(1) << region_size_log);
566
567 // Now, set up the globals.
568 guarantee(RegionSizeBytesShift == 0, "we should only set it once");
569 RegionSizeBytesShift = (size_t)region_size_log;
570
571 guarantee(RegionSizeWordsShift == 0, "we should only set it once");
572 RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
573
574 guarantee(RegionSizeBytes == 0, "we should only set it once");
575 RegionSizeBytes = region_size;
576 RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
577 assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
578
579 guarantee(RegionSizeWordsMask == 0, "we should only set it once");
580 RegionSizeWordsMask = RegionSizeWords - 1;
581
582 guarantee(RegionSizeBytesMask == 0, "we should only set it once");
583 RegionSizeBytesMask = RegionSizeBytes - 1;
584
585 guarantee(RegionCount == 0, "we should only set it once");
586 RegionCount = max_heap_size / RegionSizeBytes;
587
588 guarantee(HumongousThresholdWords == 0, "we should only set it once");
589 HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
590 assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
591
592 guarantee(HumongousThresholdBytes == 0, "we should only set it once");
593 HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
594 assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
595
596 // The rationale for trimming the TLAB sizes has to do with the raciness in
597 // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
598 // about next free size, gets the answer for region #N, goes away for a while, then
599 // tries to allocate in region #N, and fail because some other thread have claimed part
600 // of the region #N, and then the freeset allocation code has to retire the region #N,
601 // before moving the allocation to region #N+1.
602 //
603 // The worst case realizes when "answer" is "region size", which means it could
604 // prematurely retire an entire region. Having smaller TLABs does not fix that
605 // completely, but reduces the probability of too wasteful region retirement.
606 // With current divisor, we will waste no more than 1/8 of region size in the worst
607 // case. This also has a secondary effect on collection set selection: even under
608 // the race, the regions would be at least 7/8 used, which allows relying on
609 // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
610 // below the garbage threshold that would never be considered for collection.
611 //
612 // The whole thing would be mitigated if Elastic TLABs were enabled, but there
613 // is no support in this JDK.
614 //
615 guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
616 MaxTLABSizeBytes = MIN2(RegionSizeBytes / 8, HumongousThresholdBytes);
617 assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
618
619 guarantee(MaxTLABSizeWords == 0, "we should only set it once");
620 MaxTLABSizeWords = MaxTLABSizeBytes / HeapWordSize;
621
622 log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
623 RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
624 log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
625 byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
626 log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
627 byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
628 }
629
630 void ShenandoahHeapRegion::do_commit() {
631 if (!os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) {
632 report_java_out_of_memory("Unable to commit region");
633 }
634 if (!_heap->commit_bitmap_slice(this)) {
635 report_java_out_of_memory("Unable to commit bitmaps for region");
636 }
637 _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
638 }
639
640 void ShenandoahHeapRegion::do_uncommit() {
641 if (!os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) {
642 report_java_out_of_memory("Unable to uncommit region");
643 }
644 if (!_heap->uncommit_bitmap_slice(this)) {
645 report_java_out_of_memory("Unable to uncommit bitmaps for region");
646 }
647 _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
|