533 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
534
535 // If during an initial mark pause we may install a pending list head which is not
536 // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
537 // to discover.
538 void make_pending_list_reachable();
539
540 // Merges the information gathered on a per-thread basis for all worker threads
541 // during GC into global variables.
542 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
543
544 void verify_numa_regions(const char* desc);
545
546 public:
547 G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
548
549 WorkGang* workers() const { return _workers; }
550
551 // Runs the given AbstractGangTask with the current active workers, returning the
552 // total time taken.
553 Tickspan run_task(AbstractGangTask* task);
554
555 G1Allocator* allocator() {
556 return _allocator;
557 }
558
559 G1HeapVerifier* verifier() {
560 return _verifier;
561 }
562
563 G1MonitoringSupport* g1mm() {
564 assert(_g1mm != NULL, "should have been initialized");
565 return _g1mm;
566 }
567
568 void resize_heap_if_necessary();
569
570 G1NUMA* numa() const { return _numa; }
571
572 // Expand the garbage-first heap by at least the given size (in bytes!).
573 // Returns true if the heap was expanded by the requested amount;
1150 MemRegion reserved_region() const {
1151 return _reserved;
1152 }
1153
1154 HeapWord* base() const {
1155 return _reserved.start();
1156 }
1157
1158 bool is_in_reserved(const void* addr) const {
1159 return _reserved.contains(addr);
1160 }
1161
1162 G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1163
1164 G1CardTable* card_table() const {
1165 return _card_table;
1166 }
1167
1168 // Iteration functions.
1169
1170 // Iterate over all objects, calling "cl.do_object" on each.
1171 virtual void object_iterate(ObjectClosure* cl);
1172
1173 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1174 virtual void keep_alive(oop obj);
1175
1176 // Iterate over heap regions, in address order, terminating the
1177 // iteration early if the "do_heap_region" method returns "true".
1178 void heap_region_iterate(HeapRegionClosure* blk) const;
1179
1180 // Return the region with the given index. It assumes the index is valid.
1181 inline HeapRegion* region_at(uint index) const;
1182 inline HeapRegion* region_at_or_null(uint index) const;
1183
1184 // Return the next region (by index) that is part of the same
1185 // humongous object that hr is part of.
1186 inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1187
1188 // Calculate the region index of the given address. Given address must be
1189 // within the heap.
1190 inline uint addr_to_region(HeapWord* addr) const;
1191
|
533 void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
534
535 // If during an initial mark pause we may install a pending list head which is not
536 // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
537 // to discover.
538 void make_pending_list_reachable();
539
540 // Merges the information gathered on a per-thread basis for all worker threads
541 // during GC into global variables.
542 void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
543
544 void verify_numa_regions(const char* desc);
545
546 public:
547 G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
548
549 WorkGang* workers() const { return _workers; }
550
551 // Runs the given AbstractGangTask with the current active workers, returning the
552 // total time taken.
553 virtual Tickspan run_task(AbstractGangTask* task);
554
555 G1Allocator* allocator() {
556 return _allocator;
557 }
558
559 G1HeapVerifier* verifier() {
560 return _verifier;
561 }
562
563 G1MonitoringSupport* g1mm() {
564 assert(_g1mm != NULL, "should have been initialized");
565 return _g1mm;
566 }
567
568 void resize_heap_if_necessary();
569
570 G1NUMA* numa() const { return _numa; }
571
572 // Expand the garbage-first heap by at least the given size (in bytes!).
573 // Returns true if the heap was expanded by the requested amount;
1150 MemRegion reserved_region() const {
1151 return _reserved;
1152 }
1153
1154 HeapWord* base() const {
1155 return _reserved.start();
1156 }
1157
1158 bool is_in_reserved(const void* addr) const {
1159 return _reserved.contains(addr);
1160 }
1161
1162 G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1163
1164 G1CardTable* card_table() const {
1165 return _card_table;
1166 }
1167
1168 // Iteration functions.
1169
1170 void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
1171
1172 // Iterate over all objects, calling "cl.do_object" on each.
1173 virtual void object_iterate(ObjectClosure* cl);
1174
1175 virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
1176
1177 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1178 virtual void keep_alive(oop obj);
1179
1180 // Iterate over heap regions, in address order, terminating the
1181 // iteration early if the "do_heap_region" method returns "true".
1182 void heap_region_iterate(HeapRegionClosure* blk) const;
1183
1184 // Return the region with the given index. It assumes the index is valid.
1185 inline HeapRegion* region_at(uint index) const;
1186 inline HeapRegion* region_at_or_null(uint index) const;
1187
1188 // Return the next region (by index) that is part of the same
1189 // humongous object that hr is part of.
1190 inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1191
1192 // Calculate the region index of the given address. Given address must be
1193 // within the heap.
1194 inline uint addr_to_region(HeapWord* addr) const;
1195
|