< prev index next >

src/share/vm/gc/g1/heapRegion.hpp

Print this page
rev 10742 : Make fields used in lock-free algorithms volatile


 677   // Requires that "mr" be entirely within the region.
 678   // Apply "cl->do_object" to all objects that intersect with "mr".
 679   // If the iteration encounters an unparseable portion of the region,
 680   // or if "cl->abort()" is true after a closure application,
 681   // terminate the iteration and return the address of the start of the
 682   // subregion that isn't done.  (The two can be distinguished by querying
 683   // "cl->abort()".)  Return of "NULL" indicates that the iteration
 684   // completed.
 685   HeapWord*
 686   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 687 
 688   // filter_young: if true and the region is a young region then we
 689   // skip the iteration.
 690   // card_ptr: if not NULL, and we decide that the card is not young
 691   // and we iterate over it, we'll clean the card before we start the
 692   // iteration.
 693   HeapWord*
 694   oops_on_card_seq_iterate_careful(MemRegion mr,
 695                                    FilterOutOfRegionClosure* cl,
 696                                    bool filter_young,
 697                                    jbyte* card_ptr);
 698 
 699   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 700   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 701   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
 702 
 703   void set_recorded_rs_length(size_t rs_length) {
 704     _recorded_rs_length = rs_length;
 705   }
 706 
 707   void set_predicted_elapsed_time_ms(double ms) {
 708     _predicted_elapsed_time_ms = ms;
 709   }
 710 
 711   void set_predicted_bytes_to_copy(size_t bytes) {
 712     _predicted_bytes_to_copy = bytes;
 713   }
 714 
 715   virtual CompactibleSpace* next_compaction_space() const;
 716 
 717   virtual void reset_after_compaction();




 677   // Requires that "mr" be entirely within the region.
 678   // Apply "cl->do_object" to all objects that intersect with "mr".
 679   // If the iteration encounters an unparseable portion of the region,
 680   // or if "cl->abort()" is true after a closure application,
 681   // terminate the iteration and return the address of the start of the
 682   // subregion that isn't done.  (The two can be distinguished by querying
 683   // "cl->abort()".)  Return of "NULL" indicates that the iteration
 684   // completed.
 685   HeapWord*
 686   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
 687 
 688   // filter_young: if true and the region is a young region then we
 689   // skip the iteration.
 690   // card_ptr: if not NULL, and we decide that the card is not young
 691   // and we iterate over it, we'll clean the card before we start the
 692   // iteration.
 693   HeapWord*
 694   oops_on_card_seq_iterate_careful(MemRegion mr,
 695                                    FilterOutOfRegionClosure* cl,
 696                                    bool filter_young,
 697                                    volatile jbyte* card_ptr);
 698 
 699   size_t recorded_rs_length() const        { return _recorded_rs_length; }
 700   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
 701   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
 702 
 703   void set_recorded_rs_length(size_t rs_length) {
 704     _recorded_rs_length = rs_length;
 705   }
 706 
 707   void set_predicted_elapsed_time_ms(double ms) {
 708     _predicted_elapsed_time_ms = ms;
 709   }
 710 
 711   void set_predicted_bytes_to_copy(size_t bytes) {
 712     _predicted_bytes_to_copy = bytes;
 713   }
 714 
 715   virtual CompactibleSpace* next_compaction_space() const;
 716 
 717   virtual void reset_after_compaction();


< prev index next >