< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page




 532 
 533   // Process any reference objects discovered.
 534   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 535 
 536   // If during a concurrent start pause we may install a pending list head which is not
 537   // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
 538   // to discover.
 539   void make_pending_list_reachable();
 540 
 541   // Merges the information gathered on a per-thread basis for all worker threads
 542   // during GC into global variables.
 543   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 544 
 545   void verify_numa_regions(const char* desc);
 546 
 547 public:
 548   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 549 
 550   WorkGang* workers() const { return _workers; }
 551 
 552   // Runs the given AbstractGangTask with the current active workers, returning the
 553   // total time taken.
 554   Tickspan run_task(AbstractGangTask* task);



 555 
 556   G1Allocator* allocator() {
 557     return _allocator;
 558   }
 559 
 560   G1HeapVerifier* verifier() {
 561     return _verifier;
 562   }
 563 
 564   G1MonitoringSupport* g1mm() {
 565     assert(_g1mm != NULL, "should have been initialized");
 566     return _g1mm;
 567   }
 568 
 569   void resize_heap_if_necessary();
 570 
 571   G1NUMA* numa() const { return _numa; }
 572 
 573   // Expand the garbage-first heap by at least the given size (in bytes!).
 574   // Returns true if the heap was expanded by the requested amount;


1151   MemRegion reserved_region() const {
1152     return _reserved;
1153   }
1154 
1155   HeapWord* base() const {
1156     return _reserved.start();
1157   }
1158 
1159   bool is_in_reserved(const void* addr) const {
1160     return _reserved.contains(addr);
1161   }
1162 
1163   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1164 
1165   G1CardTable* card_table() const {
1166     return _card_table;
1167   }
1168 
1169   // Iteration functions.
1170 


1171   // Iterate over all objects, calling "cl.do_object" on each.
1172   virtual void object_iterate(ObjectClosure* cl);
1173 


1174   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1175   virtual void keep_alive(oop obj);
1176 
1177   // Iterate over heap regions, in address order, terminating the
1178   // iteration early if the "do_heap_region" method returns "true".
1179   void heap_region_iterate(HeapRegionClosure* blk) const;
1180 
1181   // Return the region with the given index. It assumes the index is valid.
1182   inline HeapRegion* region_at(uint index) const;
1183   inline HeapRegion* region_at_or_null(uint index) const;
1184 
1185   // Return the next region (by index) that is part of the same
1186   // humongous object that hr is part of.
1187   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1188 
1189   // Calculate the region index of the given address. Given address must be
1190   // within the heap.
1191   inline uint addr_to_region(HeapWord* addr) const;
1192 
1193   inline HeapWord* bottom_addr_for_region(uint index) const;




 532 
 533   // Process any reference objects discovered.
 534   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 535 
 536   // If during a concurrent start pause we may install a pending list head which is not
 537   // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
 538   // to discover.
 539   void make_pending_list_reachable();
 540 
 541   // Merges the information gathered on a per-thread basis for all worker threads
 542   // during GC into global variables.
 543   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 544 
 545   void verify_numa_regions(const char* desc);
 546 
 547 public:
 548   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 549 
 550   WorkGang* workers() const { return _workers; }
 551 
 552   // Runs the given AbstractGangTask with the current active workers.
 553   virtual void run_task(AbstractGangTask* task);
 554 
 555   // Runs the given AbstractGangTask with the current active workers,
 556   // returning the total time taken.
 557   Tickspan run_task_timed(AbstractGangTask* task);
 558 
 559   G1Allocator* allocator() {
 560     return _allocator;
 561   }
 562 
 563   G1HeapVerifier* verifier() {
 564     return _verifier;
 565   }
 566 
 567   G1MonitoringSupport* g1mm() {
 568     assert(_g1mm != NULL, "should have been initialized");
 569     return _g1mm;
 570   }
 571 
 572   void resize_heap_if_necessary();
 573 
 574   G1NUMA* numa() const { return _numa; }
 575 
 576   // Expand the garbage-first heap by at least the given size (in bytes!).
 577   // Returns true if the heap was expanded by the requested amount;


1154   MemRegion reserved_region() const {
1155     return _reserved;
1156   }
1157 
1158   HeapWord* base() const {
1159     return _reserved.start();
1160   }
1161 
1162   bool is_in_reserved(const void* addr) const {
1163     return _reserved.contains(addr);
1164   }
1165 
1166   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1167 
1168   G1CardTable* card_table() const {
1169     return _card_table;
1170   }
1171 
1172   // Iteration functions.
1173 
1174   void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
1175 
1176   // Iterate over all objects, calling "cl.do_object" on each.
1177   virtual void object_iterate(ObjectClosure* cl);
1178 
1179   virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
1180 
1181   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1182   virtual void keep_alive(oop obj);
1183 
1184   // Iterate over heap regions, in address order, terminating the
1185   // iteration early if the "do_heap_region" method returns "true".
1186   void heap_region_iterate(HeapRegionClosure* blk) const;
1187 
1188   // Return the region with the given index. It assumes the index is valid.
1189   inline HeapRegion* region_at(uint index) const;
1190   inline HeapRegion* region_at_or_null(uint index) const;
1191 
1192   // Return the next region (by index) that is part of the same
1193   // humongous object that hr is part of.
1194   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1195 
1196   // Calculate the region index of the given address. Given address must be
1197   // within the heap.
1198   inline uint addr_to_region(HeapWord* addr) const;
1199 
1200   inline HeapWord* bottom_addr_for_region(uint index) const;


< prev index next >