< prev index next >

src/share/vm/gc_implementation/g1/heapRegion.hpp

Print this page
rev 9374 : 8259659: Missing memory fences between memory allocation and refinement
Summary: Refactored to have needed barrier
Reviewed-by: tschatzl, ehelin


 404 
 405   void zero_marked_bytes()      {
 406     _prev_marked_bytes = _next_marked_bytes = 0;
 407   }
 408 
 409   const char* get_type_str() const { return _type.get_str(); }
 410   const char* get_short_type_str() const { return _type.get_short_str(); }
 411   G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
 412 
 413   bool is_free() const { return _type.is_free(); }
 414 
 415   bool is_young()    const { return _type.is_young();    }
 416   bool is_eden()     const { return _type.is_eden();     }
 417   bool is_survivor() const { return _type.is_survivor(); }
 418 
 419   bool isHumongous() const { return _type.is_humongous(); }
 420   bool startsHumongous() const { return _type.is_starts_humongous(); }
 421   bool continuesHumongous() const { return _type.is_continues_humongous();   }
 422 
 423   bool is_old() const { return _type.is_old(); }

 424 
 425   // For a humongous region, region in which it starts.
 426   HeapRegion* humongous_start_region() const {
 427     return _humongous_start_region;
 428   }
 429 
 430   // Return the number of distinct regions that are covered by this region:
 431   // 1 if the region is not humongous, >= 1 if the region is humongous.
 432   uint region_num() const {
 433     if (!isHumongous()) {
 434       return 1U;
 435     } else {
 436       assert(startsHumongous(), "doesn't make sense on HC regions");
 437       assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
 438       return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
 439     }
 440   }
 441 
 442   // Return the index + 1 of the last HC regions that's associated
 443   // with this HS region.


 701   // Sets the "evacuation_failed" property of the region.
 702   void set_evacuation_failed(bool b) {
 703     _evacuation_failed = b;
 704 
 705     if (b) {
 706       _next_marked_bytes = 0;
 707     }
 708   }
 709 
 710   // Requires that "mr" be entirely within the region.
 711   // Apply "cl->do_object" to all objects that intersect with "mr".
 712   // If the iteration encounters an unparseable portion of the region,
 713   // or if "cl->abort()" is true after a closure application,
 714   // terminate the iteration and return the address of the start of the
 715   // subregion that isn't done.  (The two can be distinguished by querying
 716   // "cl->abort()".)  Return of "NULL" indicates that the iteration
 717   // completed.
 718   HeapWord*
 719   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 720 
 721   // Iterate over the card in the card designated by card_ptr,
 722   // applying cl to all references in the region.
 723   // mr: the memory region covered by the card.
 724   // card_ptr: if we decide that the card is not young and we iterate
 725   // over it, we'll clean the card before we start the iteration.
 726   // Returns true if the card was successfully processed, false if an
 727   // unparsable part of the heap was encountered, which should only
 728   // happen when invoked concurrently with the mutator.


 729   bool oops_on_card_seq_iterate_careful(MemRegion mr,
 730                                         FilterOutOfRegionClosure* cl,
 731                                         jbyte* card_ptr);
 732 
 733   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 734   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 735   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
 736 
 737   void set_recorded_rs_length(size_t rs_length) {
 738     _recorded_rs_length = rs_length;
 739   }
 740 
 741   void set_predicted_elapsed_time_ms(double ms) {
 742     _predicted_elapsed_time_ms = ms;
 743   }
 744 
 745   void set_predicted_bytes_to_copy(size_t bytes) {
 746     _predicted_bytes_to_copy = bytes;
 747   }
 748 
 749   virtual CompactibleSpace* next_compaction_space() const;
 750 
 751   virtual void reset_after_compaction();




 404 
 405   void zero_marked_bytes()      {
 406     _prev_marked_bytes = _next_marked_bytes = 0;
 407   }
 408 
 409   const char* get_type_str() const { return _type.get_str(); }
 410   const char* get_short_type_str() const { return _type.get_short_str(); }
 411   G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
 412 
 413   bool is_free() const { return _type.is_free(); }
 414 
 415   bool is_young()    const { return _type.is_young();    }
 416   bool is_eden()     const { return _type.is_eden();     }
 417   bool is_survivor() const { return _type.is_survivor(); }
 418 
 419   bool isHumongous() const { return _type.is_humongous(); }
 420   bool startsHumongous() const { return _type.is_starts_humongous(); }
 421   bool continuesHumongous() const { return _type.is_continues_humongous();   }
 422 
 423   bool is_old() const { return _type.is_old(); }
 424   bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
 425 
 426   // For a humongous region, region in which it starts.
 427   HeapRegion* humongous_start_region() const {
 428     return _humongous_start_region;
 429   }
 430 
 431   // Return the number of distinct regions that are covered by this region:
 432   // 1 if the region is not humongous, >= 1 if the region is humongous.
 433   uint region_num() const {
 434     if (!isHumongous()) {
 435       return 1U;
 436     } else {
 437       assert(startsHumongous(), "doesn't make sense on HC regions");
 438       assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
 439       return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
 440     }
 441   }
 442 
 443   // Return the index + 1 of the last HC regions that's associated
 444   // with this HS region.


 702   // Sets the "evacuation_failed" property of the region.
 703   void set_evacuation_failed(bool b) {
 704     _evacuation_failed = b;
 705 
 706     if (b) {
 707       _next_marked_bytes = 0;
 708     }
 709   }
 710 
 711   // Requires that "mr" be entirely within the region.
 712   // Apply "cl->do_object" to all objects that intersect with "mr".
 713   // If the iteration encounters an unparseable portion of the region,
 714   // or if "cl->abort()" is true after a closure application,
 715   // terminate the iteration and return the address of the start of the
 716   // subregion that isn't done.  (The two can be distinguished by querying
 717   // "cl->abort()".)  Return of "NULL" indicates that the iteration
 718   // completed.
 719   HeapWord*
 720   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 721 
 722   // Iterate over the objects overlapping part of a card, applying cl
 723   // to all references in the region.  This is a helper for
 724   // G1RemSet::refine_card, and is tightly coupled with it.
 725   // mr: the memory region covered by the card, trimmed to the
 726   // allocated space for this region.  Must not be empty.
 727   // This region must be old or humongous.
 728   // Returns true if the designated objects were successfully
 729   // processed, false if an unparsable part of the heap was
 730   // encountered; that only happens when invoked concurrently with the
 731   // mutator.
 732   bool oops_on_card_seq_iterate_careful(MemRegion mr,
 733                                         FilterOutOfRegionClosure* cl);

 734 
 735   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 736   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 737   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
 738 
 739   void set_recorded_rs_length(size_t rs_length) {
 740     _recorded_rs_length = rs_length;
 741   }
 742 
 743   void set_predicted_elapsed_time_ms(double ms) {
 744     _predicted_elapsed_time_ms = ms;
 745   }
 746 
 747   void set_predicted_bytes_to_copy(size_t bytes) {
 748     _predicted_bytes_to_copy = bytes;
 749   }
 750 
 751   virtual CompactibleSpace* next_compaction_space() const;
 752 
 753   virtual void reset_after_compaction();


< prev index next >