< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 60486 : webrev 10


 534 
 535   // Process any reference objects discovered.
 536   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 537 
 538   // If during a concurrent start pause we may install a pending list head which is not
 539   // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
 540   // to discover.
 541   void make_pending_list_reachable();
 542 
 543   // Merges the information gathered on a per-thread basis for all worker threads
 544   // during GC into global variables.
 545   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 546 
 547   void verify_numa_regions(const char* desc);
 548 
 549 public:
 550   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 551 
 552   WorkGang* workers() const { return _workers; }
 553 
 554   // Runs the given AbstractGangTask with the current active workers, returning the
 555   // total time taken.
 556   Tickspan run_task(AbstractGangTask* task);



 557 
 558   G1Allocator* allocator() {
 559     return _allocator;
 560   }
 561 
 562   G1HeapVerifier* verifier() {
 563     return _verifier;
 564   }
 565 
 566   G1MonitoringSupport* g1mm() {
 567     assert(_g1mm != NULL, "should have been initialized");
 568     return _g1mm;
 569   }
 570 
 571   void resize_heap_if_necessary();
 572 
 573   G1NUMA* numa() const { return _numa; }
 574 
 575   // Expand the garbage-first heap by at least the given size (in bytes!).
 576   // Returns true if the heap was expanded by the requested amount;


1156   MemRegion reserved_region() const {
1157     return _reserved;
1158   }
1159 
1160   HeapWord* base() const {
1161     return _reserved.start();
1162   }
1163 
1164   bool is_in_reserved(const void* addr) const {
1165     return _reserved.contains(addr);
1166   }
1167 
1168   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1169 
1170   G1CardTable* card_table() const {
1171     return _card_table;
1172   }
1173 
1174   // Iteration functions.
1175 


1176   // Iterate over all objects, calling "cl.do_object" on each.
1177   virtual void object_iterate(ObjectClosure* cl);


1178 
1179   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1180   virtual void keep_alive(oop obj);
1181 
1182   // Iterate over heap regions, in address order, terminating the
1183   // iteration early if the "do_heap_region" method returns "true".
1184   void heap_region_iterate(HeapRegionClosure* blk) const;
1185 
1186   // Return the region with the given index. It assumes the index is valid.
1187   inline HeapRegion* region_at(uint index) const;
1188   inline HeapRegion* region_at_or_null(uint index) const;
1189 
1190   // Return the next region (by index) that is part of the same
1191   // humongous object that hr is part of.
1192   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1193 
1194   // Calculate the region index of the given address. Given address must be
1195   // within the heap.
1196   inline uint addr_to_region(HeapWord* addr) const;
1197 




 534 
 535   // Process any reference objects discovered.
 536   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 537 
 538   // If during a concurrent start pause we may install a pending list head which is not
 539   // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
 540   // to discover.
 541   void make_pending_list_reachable();
 542 
 543   // Merges the information gathered on a per-thread basis for all worker threads
 544   // during GC into global variables.
 545   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 546 
 547   void verify_numa_regions(const char* desc);
 548 
 549 public:
 550   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 551 
 552   WorkGang* workers() const { return _workers; }
 553 
 554   // Runs the given AbstractGangTask with the current active workers.
 555   virtual void run_task(AbstractGangTask* task);
 556 
 557   // Runs the given AbstractGangTask with the current active workers,
 558   // returning the total time taken.
 559   Tickspan run_task_timed(AbstractGangTask* task);
 560 
 561   G1Allocator* allocator() {
 562     return _allocator;
 563   }
 564 
 565   G1HeapVerifier* verifier() {
 566     return _verifier;
 567   }
 568 
 569   G1MonitoringSupport* g1mm() {
 570     assert(_g1mm != NULL, "should have been initialized");
 571     return _g1mm;
 572   }
 573 
 574   void resize_heap_if_necessary();
 575 
 576   G1NUMA* numa() const { return _numa; }
 577 
 578   // Expand the garbage-first heap by at least the given size (in bytes!).
 579   // Returns true if the heap was expanded by the requested amount;


1159   MemRegion reserved_region() const {
1160     return _reserved;
1161   }
1162 
1163   HeapWord* base() const {
1164     return _reserved.start();
1165   }
1166 
1167   bool is_in_reserved(const void* addr) const {
1168     return _reserved.contains(addr);
1169   }
1170 
1171   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1172 
1173   G1CardTable* card_table() const {
1174     return _card_table;
1175   }
1176 
1177   // Iteration functions.
1178 
1179   void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
1180 
1181   // Iterate over all objects, calling "cl.do_object" on each.
1182   virtual void object_iterate(ObjectClosure* cl);
1183 
1184   virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
1185 
1186   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1187   virtual void keep_alive(oop obj);
1188 
1189   // Iterate over heap regions, in address order, terminating the
1190   // iteration early if the "do_heap_region" method returns "true".
1191   void heap_region_iterate(HeapRegionClosure* blk) const;
1192 
1193   // Return the region with the given index. It assumes the index is valid.
1194   inline HeapRegion* region_at(uint index) const;
1195   inline HeapRegion* region_at_or_null(uint index) const;
1196 
1197   // Return the next region (by index) that is part of the same
1198   // humongous object that hr is part of.
1199   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1200 
1201   // Calculate the region index of the given address. Given address must be
1202   // within the heap.
1203   inline uint addr_to_region(HeapWord* addr) const;
1204 


< prev index next >