< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page




 531 
 532   // Process any reference objects discovered.
 533   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 534 
 535   // If during an initial mark pause we may install a pending list head which is not
 536   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 537   // to discover.
 538   void make_pending_list_reachable();
 539 
 540   // Merges the information gathered on a per-thread basis for all worker threads
 541   // during GC into global variables.
 542   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 543 
 544   void verify_numa_regions(const char* desc);
 545 
 546 public:
 547   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 548 
 549   WorkGang* workers() const { return _workers; }
 550 
 551   // Runs the given AbstractGangTask with the current active workers, returning the
 552   // total time taken.
 553   Tickspan run_task(AbstractGangTask* task);



 554 
 555   G1Allocator* allocator() {
 556     return _allocator;
 557   }
 558 
 559   G1HeapVerifier* verifier() {
 560     return _verifier;
 561   }
 562 
 563   G1MonitoringSupport* g1mm() {
 564     assert(_g1mm != NULL, "should have been initialized");
 565     return _g1mm;
 566   }
 567 
 568   void resize_heap_if_necessary();
 569 
 570   G1NUMA* numa() const { return _numa; }
 571 
 572   // Expand the garbage-first heap by at least the given size (in bytes!).
 573   // Returns true if the heap was expanded by the requested amount;


1150   MemRegion reserved_region() const {
1151     return _reserved;
1152   }
1153 
1154   HeapWord* base() const {
1155     return _reserved.start();
1156   }
1157 
1158   bool is_in_reserved(const void* addr) const {
1159     return _reserved.contains(addr);
1160   }
1161 
1162   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1163 
1164   G1CardTable* card_table() const {
1165     return _card_table;
1166   }
1167 
1168   // Iteration functions.
1169 


1170   // Iterate over all objects, calling "cl.do_object" on each.
1171   virtual void object_iterate(ObjectClosure* cl);


1172 
1173   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1174   virtual void keep_alive(oop obj);
1175 
1176   // Iterate over heap regions, in address order, terminating the
1177   // iteration early if the "do_heap_region" method returns "true".
1178   void heap_region_iterate(HeapRegionClosure* blk) const;
1179 
1180   // Return the region with the given index. It assumes the index is valid.
1181   inline HeapRegion* region_at(uint index) const;
1182   inline HeapRegion* region_at_or_null(uint index) const;
1183 
1184   // Return the next region (by index) that is part of the same
1185   // humongous object that hr is part of.
1186   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1187 
1188   // Calculate the region index of the given address. Given address must be
1189   // within the heap.
1190   inline uint addr_to_region(HeapWord* addr) const;
1191 




 531 
 532   // Process any reference objects discovered.
 533   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 534 
 535   // If during an initial mark pause we may install a pending list head which is not
 536   // otherwise reachable ensure that it is marked in the bitmap for concurrent marking
 537   // to discover.
 538   void make_pending_list_reachable();
 539 
 540   // Merges the information gathered on a per-thread basis for all worker threads
 541   // during GC into global variables.
 542   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 543 
 544   void verify_numa_regions(const char* desc);
 545 
 546 public:
 547   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 548 
 549   WorkGang* workers() const { return _workers; }
 550 
 551   // Runs the given AbstractGangTask with the current active workers.
 552   virtual void run_task(AbstractGangTask* task);
 553 
 554   // Runs the given AbstractGangTask with the current active workers,
 555   // returning the total time taken.
 556   Tickspan run_task_timed(AbstractGangTask* task);
 557 
 558   G1Allocator* allocator() {
 559     return _allocator;
 560   }
 561 
 562   G1HeapVerifier* verifier() {
 563     return _verifier;
 564   }
 565 
 566   G1MonitoringSupport* g1mm() {
 567     assert(_g1mm != NULL, "should have been initialized");
 568     return _g1mm;
 569   }
 570 
 571   void resize_heap_if_necessary();
 572 
 573   G1NUMA* numa() const { return _numa; }
 574 
 575   // Expand the garbage-first heap by at least the given size (in bytes!).
 576   // Returns true if the heap was expanded by the requested amount;


1153   MemRegion reserved_region() const {
1154     return _reserved;
1155   }
1156 
1157   HeapWord* base() const {
1158     return _reserved.start();
1159   }
1160 
1161   bool is_in_reserved(const void* addr) const {
1162     return _reserved.contains(addr);
1163   }
1164 
1165   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1166 
1167   G1CardTable* card_table() const {
1168     return _card_table;
1169   }
1170 
1171   // Iteration functions.
1172 
1173   void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
1174 
1175   // Iterate over all objects, calling "cl.do_object" on each.
1176   virtual void object_iterate(ObjectClosure* cl);
1177 
1178   virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
1179 
1180   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1181   virtual void keep_alive(oop obj);
1182 
1183   // Iterate over heap regions, in address order, terminating the
1184   // iteration early if the "do_heap_region" method returns "true".
1185   void heap_region_iterate(HeapRegionClosure* blk) const;
1186 
1187   // Return the region with the given index. It assumes the index is valid.
1188   inline HeapRegion* region_at(uint index) const;
1189   inline HeapRegion* region_at_or_null(uint index) const;
1190 
1191   // Return the next region (by index) that is part of the same
1192   // humongous object that hr is part of.
1193   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1194 
1195   // Calculate the region index of the given address. Given address must be
1196   // within the heap.
1197   inline uint addr_to_region(HeapWord* addr) const;
1198 


< prev index next >