src/share/vm/gc_implementation/g1/heapRegion.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc9 Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/heapRegion.hpp

Print this page




 189 
 190   HeapWord* block_start(const void* p);
 191   HeapWord* block_start_const(const void* p) const;
 192 
 193   void prepare_for_compaction(CompactPoint* cp);
 194 
 195   // Add offset table update.
 196   virtual HeapWord* allocate(size_t word_size);
 197   HeapWord* par_allocate(size_t word_size);
 198 
 199   // MarkSweep support phase3
 200   virtual HeapWord* initialize_threshold();
 201   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 202 
 203   virtual void print() const;
 204 
 205   void reset_bot() {
 206     _offsets.reset_bot();
 207   }
 208 
 209   void update_bot_for_object(HeapWord* start, size_t word_size) {
 210     _offsets.alloc_block(start, word_size);
 211   }
 212 
 213   void print_bot_on(outputStream* out) {
 214     _offsets.print_on(out);
 215   }
 216 };
 217 
 218 class HeapRegion: public G1OffsetTableContigSpace {
 219   friend class VMStructs;
 220  private:
 221 
 222   enum HumongousType {
 223     NotHumongous = 0,
 224     StartsHumongous,
 225     ContinuesHumongous
 226   };
 227 
 228   // The remembered set for this region.
 229   // (Might want to make this "inline" later, to avoid some alloc failure
 230   // issues.)
 231   HeapRegionRemSet* _rem_set;
 232 


 719   // Apply "cl->do_object" to all objects that intersect with "mr".
 720   // If the iteration encounters an unparseable portion of the region,
 721   // or if "cl->abort()" is true after a closure application,
 722   // terminate the iteration and return the address of the start of the
 723   // subregion that isn't done.  (The two can be distinguished by querying
 724   // "cl->abort()".)  Return of "NULL" indicates that the iteration
 725   // completed.
 726   HeapWord*
 727   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 728 
 729   // filter_young: if true and the region is a young region then we
 730   // skip the iteration.
 731   // card_ptr: if not NULL, and we decide that the card is not young
 732   // and we iterate over it, we'll clean the card before we start the
 733   // iteration.
 734   HeapWord*
 735   oops_on_card_seq_iterate_careful(MemRegion mr,
 736                                    FilterOutOfRegionClosure* cl,
 737                                    bool filter_young,
 738                                    jbyte* card_ptr);
 739 
 740   // A version of block start that is guaranteed to find *some* block
 741   // boundary at or before "p", but does not object iteration, and may
 742   // therefore be used safely when the heap is unparseable.
 743   HeapWord* block_start_careful(const void* p) const {
 744     return _offsets.block_start_careful(p);
 745   }
 746 
 747   // Requires that "addr" is within the region.  Returns the start of the
 748   // first ("careful") block that starts at or after "addr", or else the
 749   // "end" of the region if there is no such block.
 750   HeapWord* next_block_start_careful(HeapWord* addr);
 751 
 752   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 753   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 754   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
 755 
 756   void set_recorded_rs_length(size_t rs_length) {
 757     _recorded_rs_length = rs_length;
 758   }
 759 
 760   void set_predicted_elapsed_time_ms(double ms) {
 761     _predicted_elapsed_time_ms = ms;
 762   }
 763 
 764   void set_predicted_bytes_to_copy(size_t bytes) {
 765     _predicted_bytes_to_copy = bytes;
 766   }
 767 
 768   virtual CompactibleSpace* next_compaction_space() const;
 769 
 770   virtual void reset_after_compaction();




 189 
 190   HeapWord* block_start(const void* p);
 191   HeapWord* block_start_const(const void* p) const;
 192 
 193   void prepare_for_compaction(CompactPoint* cp);
 194 
 195   // Add offset table update.
 196   virtual HeapWord* allocate(size_t word_size);
 197   HeapWord* par_allocate(size_t word_size);
 198 
 199   // MarkSweep support phase3
 200   virtual HeapWord* initialize_threshold();
 201   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 202 
 203   virtual void print() const;
 204 
 205   void reset_bot() {
 206     _offsets.reset_bot();
 207   }
 208 




 209   void print_bot_on(outputStream* out) {
 210     _offsets.print_on(out);
 211   }
 212 };
 213 
 214 class HeapRegion: public G1OffsetTableContigSpace {
 215   friend class VMStructs;
 216  private:
 217 
 218   enum HumongousType {
 219     NotHumongous = 0,
 220     StartsHumongous,
 221     ContinuesHumongous
 222   };
 223 
 224   // The remembered set for this region.
 225   // (Might want to make this "inline" later, to avoid some alloc failure
 226   // issues.)
 227   HeapRegionRemSet* _rem_set;
 228 


 715   // Apply "cl->do_object" to all objects that intersect with "mr".
 716   // If the iteration encounters an unparseable portion of the region,
 717   // or if "cl->abort()" is true after a closure application,
 718   // terminate the iteration and return the address of the start of the
 719   // subregion that isn't done.  (The two can be distinguished by querying
 720   // "cl->abort()".)  Return of "NULL" indicates that the iteration
 721   // completed.
 722   HeapWord*
 723   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 724 
 725   // filter_young: if true and the region is a young region then we
 726   // skip the iteration.
 727   // card_ptr: if not NULL, and we decide that the card is not young
 728   // and we iterate over it, we'll clean the card before we start the
 729   // iteration.
 730   HeapWord*
 731   oops_on_card_seq_iterate_careful(MemRegion mr,
 732                                    FilterOutOfRegionClosure* cl,
 733                                    bool filter_young,
 734                                    jbyte* card_ptr);












 735 
 736   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 737   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 738   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
 739 
 740   void set_recorded_rs_length(size_t rs_length) {
 741     _recorded_rs_length = rs_length;
 742   }
 743 
 744   void set_predicted_elapsed_time_ms(double ms) {
 745     _predicted_elapsed_time_ms = ms;
 746   }
 747 
 748   void set_predicted_bytes_to_copy(size_t bytes) {
 749     _predicted_bytes_to_copy = bytes;
 750   }
 751 
 752   virtual CompactibleSpace* next_compaction_space() const;
 753 
 754   virtual void reset_after_compaction();


src/share/vm/gc_implementation/g1/heapRegion.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File