170
171 class RefineCardTableEntryClosure;
172
173 class G1RegionMappingChangedListener : public G1MappingChangedListener {
174 private:
175 void reset_from_card_cache(uint start_idx, size_t num_regions);
176 public:
177 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
178 };
179
180 class G1CollectedHeap : public CollectedHeap {
181 friend class VM_CollectForMetadataAllocation;
182 friend class VM_G1CollectForAllocation;
183 friend class VM_G1CollectFull;
184 friend class VM_G1IncCollectionPause;
185 friend class VMStructs;
186 friend class MutatorAllocRegion;
187 friend class SurvivorGCAllocRegion;
188 friend class OldGCAllocRegion;
189 friend class G1Allocator;
190
191 // Closures used in implementation.
192 friend class G1ParScanThreadState;
193 friend class G1ParTask;
194 friend class G1ParGCAllocator;
195 friend class G1PrepareCompactClosure;
196
197 // Other related classes.
198 friend class HeapRegionClaimer;
199
200 // Testing classes.
201 friend class G1CheckCSetFastTableClosure;
202
203 private:
204 FlexibleWorkGang* _workers;
205
206 static size_t _humongous_object_threshold_in_words;
207
208 // The secondary free list which contains regions that have been
209 // freed up during the cleanup process. This will be appended to
232 // before heap shrinking (free_list_only == true).
233 void tear_down_region_sets(bool free_list_only);
234
235 // Rebuilds the region sets / lists so that they are repopulated to
236 // reflect the contents of the heap. The only exception is the
237 // humongous set which was not torn down in the first place. If
238 // free_list_only is true, it will only rebuild the master free
239 // list. It is called after a Full GC (free_list_only == false) or
240 // after heap shrinking (free_list_only == true).
241 void rebuild_region_sets(bool free_list_only);
242
243 // Callback for region mapping changed events.
244 G1RegionMappingChangedListener _listener;
245
246 // The sequence of all heap regions in the heap.
247 HeapRegionManager _hrm;
248
249 // Class that handles the different kinds of allocations.
250 G1Allocator* _allocator;
251
252 // Statistics for each allocation context
253 AllocationContextStats _allocation_context_stats;
254
255 // PLAB sizing policy for survivors.
256 PLABStats _survivor_plab_stats;
257
258 // PLAB sizing policy for tenured objects.
259 PLABStats _old_plab_stats;
260
261 // It specifies whether we should attempt to expand the heap after a
262 // region allocation failure. If heap expansion fails we set this to
263 // false so that we don't re-attempt the heap expansion (it's likely
264 // that subsequent expansion attempts will also fail if one fails).
265 // Currently, it is only consulted during GC and it's reset at the
266 // start of each GC.
267 bool _expand_heap_after_alloc_failure;
268
269 // It resets the mutator alloc region before new allocations can take place.
270 void init_mutator_alloc_region();
271
710 // list later). The used bytes of freed regions are accumulated in
711 // pre_used. If par is true, the region's RSet will not be freed
712 // up. The assumption is that this will be done later.
713 // The locked parameter indicates if the caller has already taken
714 // care of proper synchronization. This may allow some optimizations.
715 void free_region(HeapRegion* hr,
716 FreeRegionList* free_list,
717 bool par,
718 bool locked = false);
719
720 // Frees a humongous region by collapsing it into individual regions
721 // and calling free_region() for each of them. The freed regions
722 // will be added to the free list that's passed as a parameter (this
723 // is usually a local list which will be appended to the master free
724 // list later). The used bytes of freed regions are accumulated in
725 // pre_used. If par is true, the region's RSet will not be freed
726 // up. The assumption is that this will be done later.
727 void free_humongous_region(HeapRegion* hr,
728 FreeRegionList* free_list,
729 bool par);
730 protected:
731
732 // Shrink the garbage-first heap by at most the given size (in bytes!).
733 // (Rounds down to a HeapRegion boundary.)
734 virtual void shrink(size_t expand_bytes);
735 void shrink_helper(size_t expand_bytes);
736
737 #if TASKQUEUE_STATS
738 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
739 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
740 void reset_taskqueue_stats();
741 #endif // TASKQUEUE_STATS
742
743 // Schedule the VM operation that will do an evacuation pause to
744 // satisfy an allocation request of word_size. *succeeded will
745 // return whether the VM operation was successful (it did do an
746 // evacuation pause) or not (another thread beat us to it or the GC
747 // locker was active). Given that we should not be holding the
748 // Heap_lock when we enter this method, we will pass the
749 // gc_count_before (i.e., total_collections()) as a parameter since
1373
1374 virtual bool is_scavengable(const void* addr);
1375
1376 // We don't need barriers for initializing stores to objects
1377 // in the young gen: for the SATB pre-barrier, there is no
1378 // pre-value that needs to be remembered; for the remembered-set
1379 // update logging post-barrier, we don't maintain remembered set
1380 // information for young gen objects.
1381 virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1382
1383 // Returns "true" iff the given word_size is "very large".
1384 static bool is_humongous(size_t word_size) {
1385 // Note this has to be strictly greater-than as the TLABs
1386 // are capped at the humongous threshold and we want to
1387 // ensure that we don't try to allocate a TLAB as
1388 // humongous and that we don't allocate a humongous
1389 // object in a TLAB.
1390 return word_size > _humongous_object_threshold_in_words;
1391 }
1392
1393 // Update mod union table with the set of dirty cards.
1394 void updateModUnion();
1395
1396 // Set the mod union bits corresponding to the given memRegion. Note
1397 // that this is always a safe operation, since it doesn't clear any
1398 // bits.
1399 void markModUnionRange(MemRegion mr);
1400
1401 // Records the fact that a marking phase is no longer in progress.
1402 void set_marking_complete() {
1403 _mark_in_progress = false;
1404 }
1405 void set_marking_started() {
1406 _mark_in_progress = true;
1407 }
1408 bool mark_in_progress() {
1409 return _mark_in_progress;
1410 }
1411
1412 // Print the maximum heap capacity.
1430 }
1431
1432 bool check_young_list_empty(bool check_heap,
1433 bool check_sample = true);
1434
1435 // *** Stuff related to concurrent marking. It's not clear to me that so
1436 // many of these need to be public.
1437
1438 // The functions below are helper functions that a subclass of
1439 // "CollectedHeap" can use in the implementation of its virtual
1440 // functions.
1441 // This performs a concurrent marking of the live objects in a
1442 // bitmap off to the side.
1443 void doConcurrentMark();
1444
1445 bool isMarkedPrev(oop obj) const;
1446 bool isMarkedNext(oop obj) const;
1447
1448 // Determine if an object is dead, given the object and also
1449 // the region to which the object belongs. An object is dead
1450 // iff a) it was not allocated since the last mark and b) it
1451 // is not marked.
1452 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1453 return
1454 !hr->obj_allocated_since_prev_marking(obj) &&
1455 !isMarkedPrev(obj);
1456 }
1457
1458 // This function returns true when an object has been
1459 // around since the previous marking and hasn't yet
1460 // been marked during this marking.
1461 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1462 return
1463 !hr->obj_allocated_since_next_marking(obj) &&
1464 !isMarkedNext(obj);
1465 }
1466
1467 // Determine if an object is dead, given only the object itself.
1468 // This will find the region to which the object belongs and
1469 // then call the region version of the same function.
1470
1471 // Added if it is NULL it isn't dead.
1472
1473 inline bool is_obj_dead(const oop obj) const;
1474
1475 inline bool is_obj_ill(const oop obj) const;
1476
1477 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1478 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1479 bool is_marked(oop obj, VerifyOption vo);
1480 const char* top_at_mark_start_str(VerifyOption vo);
1481
1482 ConcurrentMark* concurrent_mark() const { return _cm; }
1483
1484 // Refinement
|
170
171 class RefineCardTableEntryClosure;
172
173 class G1RegionMappingChangedListener : public G1MappingChangedListener {
174 private:
175 void reset_from_card_cache(uint start_idx, size_t num_regions);
176 public:
177 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
178 };
179
180 class G1CollectedHeap : public CollectedHeap {
181 friend class VM_CollectForMetadataAllocation;
182 friend class VM_G1CollectForAllocation;
183 friend class VM_G1CollectFull;
184 friend class VM_G1IncCollectionPause;
185 friend class VMStructs;
186 friend class MutatorAllocRegion;
187 friend class SurvivorGCAllocRegion;
188 friend class OldGCAllocRegion;
189 friend class G1Allocator;
190 friend class G1RecordingAllocator;
191
192 // Closures used in implementation.
193 friend class G1ParScanThreadState;
194 friend class G1ParTask;
195 friend class G1ParGCAllocator;
196 friend class G1PrepareCompactClosure;
197
198 // Other related classes.
199 friend class HeapRegionClaimer;
200
201 // Testing classes.
202 friend class G1CheckCSetFastTableClosure;
203
204 private:
205 FlexibleWorkGang* _workers;
206
207 static size_t _humongous_object_threshold_in_words;
208
209 // The secondary free list which contains regions that have been
210 // freed up during the cleanup process. This will be appended to
233 // before heap shrinking (free_list_only == true).
234 void tear_down_region_sets(bool free_list_only);
235
236 // Rebuilds the region sets / lists so that they are repopulated to
237 // reflect the contents of the heap. The only exception is the
238 // humongous set which was not torn down in the first place. If
239 // free_list_only is true, it will only rebuild the master free
240 // list. It is called after a Full GC (free_list_only == false) or
241 // after heap shrinking (free_list_only == true).
242 void rebuild_region_sets(bool free_list_only);
243
244 // Callback for region mapping changed events.
245 G1RegionMappingChangedListener _listener;
246
247 // The sequence of all heap regions in the heap.
248 HeapRegionManager _hrm;
249
250 // Class that handles the different kinds of allocations.
251 G1Allocator* _allocator;
252
253 // Class that handles recording allocation ranges.
254 G1RecordingAllocator* _recording_allocator;
255
256 // Statistics for each allocation context
257 AllocationContextStats _allocation_context_stats;
258
259 // PLAB sizing policy for survivors.
260 PLABStats _survivor_plab_stats;
261
262 // PLAB sizing policy for tenured objects.
263 PLABStats _old_plab_stats;
264
265 // It specifies whether we should attempt to expand the heap after a
266 // region allocation failure. If heap expansion fails we set this to
267 // false so that we don't re-attempt the heap expansion (it's likely
268 // that subsequent expansion attempts will also fail if one fails).
269 // Currently, it is only consulted during GC and it's reset at the
270 // start of each GC.
271 bool _expand_heap_after_alloc_failure;
272
273 // It resets the mutator alloc region before new allocations can take place.
274 void init_mutator_alloc_region();
275
714 // list later). The used bytes of freed regions are accumulated in
715 // pre_used. If par is true, the region's RSet will not be freed
716 // up. The assumption is that this will be done later.
717 // The locked parameter indicates if the caller has already taken
718 // care of proper synchronization. This may allow some optimizations.
719 void free_region(HeapRegion* hr,
720 FreeRegionList* free_list,
721 bool par,
722 bool locked = false);
723
724 // Frees a humongous region by collapsing it into individual regions
725 // and calling free_region() for each of them. The freed regions
726 // will be added to the free list that's passed as a parameter (this
727 // is usually a local list which will be appended to the master free
728 // list later). The used bytes of freed regions are accumulated in
729 // pre_used. If par is true, the region's RSet will not be freed
730 // up. The assumption is that this will be done later.
731 void free_humongous_region(HeapRegion* hr,
732 FreeRegionList* free_list,
733 bool par);
734
735 // Facility for allocating high heap memory from the VM thread, and
736 // recording the allocated ranges. The end_ call optionally aligns the
737 // end address and returns the allocated ranges as an ascending array
738 // of MemRegions. This can be used to create and archive a heap
739 // region which can be mapped at the same fixed addresses in a future
740 // JVM instance.
741 void begin_record_alloc_range();
742 void end_record_alloc_range(GrowableArray<MemRegion>* ranges,
743 uint end_alignment = 0);
744 bool is_record_alloc_too_large(size_t word_size);
745 HeapWord* record_mem_allocate(size_t word_size);
746 HeapRegion* alloc_highest_available_region();
747
748 // Facility for allocating a fixed range within the heap and marking
749 // the containing regions as 'archive'. For use at JVM init time, when the caller
750 // may mmap archived heap data at the specified range(s). The check_ call
751 // verifies that the regions are within the reserved heap. The alloc_ call
752 // commits the appropriate regions and marks them as 'archive," after which
753 // the caller can perform the mmap. The fill_ call (which must occur after class
754 // loading) inserts any required filler objects around the specified ranges
755 // to make the regions parseable.
756 bool check_archive_addresses(MemRegion* range, uint count);
757 bool alloc_archive_regions(MemRegion* range, uint count);
758 void fill_archive_regions(MemRegion* range, uint count);
759
760 // Fill the requested space without creating any humongous objects.
761 static void fill_with_non_humongous_objects(HeapWord* base_address, size_t word_size);
762
763 protected:
764
765 // Shrink the garbage-first heap by at most the given size (in bytes!).
766 // (Rounds down to a HeapRegion boundary.)
767 virtual void shrink(size_t expand_bytes);
768 void shrink_helper(size_t expand_bytes);
769
770 #if TASKQUEUE_STATS
771 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
772 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
773 void reset_taskqueue_stats();
774 #endif // TASKQUEUE_STATS
775
776 // Schedule the VM operation that will do an evacuation pause to
777 // satisfy an allocation request of word_size. *succeeded will
778 // return whether the VM operation was successful (it did do an
779 // evacuation pause) or not (another thread beat us to it or the GC
780 // locker was active). Given that we should not be holding the
781 // Heap_lock when we enter this method, we will pass the
782 // gc_count_before (i.e., total_collections()) as a parameter since
1406
1407 virtual bool is_scavengable(const void* addr);
1408
1409 // We don't need barriers for initializing stores to objects
1410 // in the young gen: for the SATB pre-barrier, there is no
1411 // pre-value that needs to be remembered; for the remembered-set
1412 // update logging post-barrier, we don't maintain remembered set
1413 // information for young gen objects.
1414 virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1415
1416 // Returns "true" iff the given word_size is "very large".
1417 static bool is_humongous(size_t word_size) {
1418 // Note this has to be strictly greater-than as the TLABs
1419 // are capped at the humongous threshold and we want to
1420 // ensure that we don't try to allocate a TLAB as
1421 // humongous and that we don't allocate a humongous
1422 // object in a TLAB.
1423 return word_size > _humongous_object_threshold_in_words;
1424 }
1425
1426 // Returns the humongous threshold for a specific region size
1427 static size_t humongous_threshold_for(size_t region_size) {
1428 return (region_size / 2);
1429 }
1430
1431 // Update mod union table with the set of dirty cards.
1432 void updateModUnion();
1433
1434 // Set the mod union bits corresponding to the given memRegion. Note
1435 // that this is always a safe operation, since it doesn't clear any
1436 // bits.
1437 void markModUnionRange(MemRegion mr);
1438
1439 // Records the fact that a marking phase is no longer in progress.
1440 void set_marking_complete() {
1441 _mark_in_progress = false;
1442 }
1443 void set_marking_started() {
1444 _mark_in_progress = true;
1445 }
1446 bool mark_in_progress() {
1447 return _mark_in_progress;
1448 }
1449
1450 // Print the maximum heap capacity.
1468 }
1469
1470 bool check_young_list_empty(bool check_heap,
1471 bool check_sample = true);
1472
1473 // *** Stuff related to concurrent marking. It's not clear to me that so
1474 // many of these need to be public.
1475
1476 // The functions below are helper functions that a subclass of
1477 // "CollectedHeap" can use in the implementation of its virtual
1478 // functions.
1479 // This performs a concurrent marking of the live objects in a
1480 // bitmap off to the side.
1481 void doConcurrentMark();
1482
1483 bool isMarkedPrev(oop obj) const;
1484 bool isMarkedNext(oop obj) const;
1485
1486 // Determine if an object is dead, given the object and also
1487 // the region to which the object belongs. An object is dead
1488 // iff a) it was not allocated since the last mark, b) it
1489 // is not marked, and c) it is not in an archive region.
1490 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1491 return
1492 !hr->obj_allocated_since_prev_marking(obj) &&
1493 !isMarkedPrev(obj) &&
1494 !hr->is_archive();
1495 }
1496
1497 // This function returns true when an object has been
1498 // around since the previous marking and hasn't yet
1499 // been marked during this marking, and is not in an archive region.
1500 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1501 return
1502 !hr->obj_allocated_since_next_marking(obj) &&
1503 !isMarkedNext(obj) &&
1504 !hr->is_archive();
1505 }
1506
1507 // Determine if an object is dead, given only the object itself.
1508 // This will find the region to which the object belongs and
1509 // then call the region version of the same function.
1510
1511 // Added if it is NULL it isn't dead.
1512
1513 inline bool is_obj_dead(const oop obj) const;
1514
1515 inline bool is_obj_ill(const oop obj) const;
1516
1517 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1518 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1519 bool is_marked(oop obj, VerifyOption vo);
1520 const char* top_at_mark_start_str(VerifyOption vo);
1521
1522 ConcurrentMark* concurrent_mark() const { return _cm; }
1523
1524 // Refinement
|