< prev index next >

src/hotspot/share/gc/g1/heapRegion.hpp

Print this page
rev 57223 : imported patch 8225484-changes-to-survivor-calculation


 227   // word until the top and/or end of the region, and is the part
 228   // of the region for which no marking was done, i.e. objects may
 229   // have been allocated in this part since the last mark phase.
 230   // "prev" is the top at the start of the last completed marking.
 231   // "next" is the top at the start of the in-progress marking (if any.)
 232   HeapWord* _prev_top_at_mark_start;
 233   HeapWord* _next_top_at_mark_start;
 234 
 235   // We use concurrent marking to determine the amount of live data
 236   // in each heap region.
 237   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
 238   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
 239 
 240   void init_top_at_mark_start() {
 241     assert(_prev_marked_bytes == 0 &&
 242            _next_marked_bytes == 0,
 243            "Must be called after zero_marked_bytes.");
 244     _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
 245   }
 246 


 247   // Data for young region survivor prediction.
 248   uint  _young_index_in_cset;
 249   G1SurvRateGroup* _surv_rate_group;
 250   int  _age_index;
 251 
 252   // Cached attributes used in the collection set policy information
 253 
 254   // The calculated GC efficiency of the region.
 255   double _gc_efficiency;
 256 
 257   uint _node_index;
 258 
 259   void report_region_type_change(G1HeapRegionTraceType::Type to);
 260 
 261   // Returns whether the given object address refers to a dead object, and either the
 262   // size of the object (if live) or the size of the block (if dead) in size.
 263   // May
 264   // - only called with obj < top()
 265   // - not called on humongous objects or archive regions
 266   inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;


 397   bool is_starts_humongous() const { return _type.is_starts_humongous(); }
 398   bool is_continues_humongous() const { return _type.is_continues_humongous();   }
 399 
 400   bool is_old() const { return _type.is_old(); }
 401 
 402   bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
 403 
 404   bool is_old_or_humongous_or_archive() const { return _type.is_old_or_humongous_or_archive(); }
 405 
 406   // A pinned region contains objects which are not moved by garbage collections.
 407   // Humongous regions and archive regions are pinned.
 408   bool is_pinned() const { return _type.is_pinned(); }
 409 
 410   // An archive region is a pinned region, also tagged as old, which
 411   // should not be marked during mark/sweep. This allows the address
 412   // space to be shared by JVM instances.
 413   bool is_archive()        const { return _type.is_archive(); }
 414   bool is_open_archive()   const { return _type.is_open_archive(); }
 415   bool is_closed_archive() const { return _type.is_closed_archive(); }
 416 








 417   void set_free();
 418 
 419   void set_eden();
 420   void set_eden_pre_gc();
 421   void set_survivor();
 422 
 423   void move_to_old();
 424   void set_old();
 425 
 426   void set_open_archive();
 427   void set_closed_archive();
 428 
 429   // For a humongous region, region in which it starts.
 430   HeapRegion* humongous_start_region() const {
 431     return _humongous_start_region;
 432   }
 433 
 434   // Makes the current region be a "starts humongous" region, i.e.,
 435   // the first region in a series of one or more contiguous regions
 436   // that will contain a single "humongous" object.


 516   // Notify the region that we have finished processing self-forwarded
 517   // objects during evac failure handling.
 518   void note_self_forwarding_removal_end(size_t marked_bytes);
 519 
 520   uint index_in_opt_cset() const {
 521     assert(has_index_in_opt_cset(), "Opt cset index not set.");
 522     return _index_in_opt_cset;
 523   }
 524   bool has_index_in_opt_cset() const { return _index_in_opt_cset != InvalidCSetIndex; }
 525   void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
 526   void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
 527 
 528   void calc_gc_efficiency(void);
 529   double gc_efficiency() const { return _gc_efficiency;}
 530 
 531   uint  young_index_in_cset() const { return _young_index_in_cset; }
 532   void clear_young_index_in_cset() { _young_index_in_cset = 0; }
 533   void set_young_index_in_cset(uint index) {
 534     assert(index != UINT_MAX, "just checking");
 535     assert(index != 0, "just checking");
 536     assert(is_young(), "pre-condition");
 537     _young_index_in_cset = index;
 538   }
 539 
 540   int age_in_surv_rate_group() const;
 541   bool has_valid_age_in_surv_rate() const;
 542 
 543   bool has_surv_rate_group() const;
 544 
 545   double surv_rate_prediction(G1Predictions const& predictor) const;
 546 
 547   void install_surv_rate_group(G1SurvRateGroup* surv_rate_group);
 548   void uninstall_surv_rate_group();
 549 
 550   void record_surv_words_in_group(size_t words_survived);
 551 
 552   // Determine if an object has been allocated since the last
 553   // mark performed by the collector. This returns true iff the object
 554   // is within the unmarked area of the region.
 555   bool obj_allocated_since_prev_marking(oop obj) const {
 556     return (HeapWord *) obj >= prev_top_at_mark_start();




 227   // word until the top and/or end of the region, and is the part
 228   // of the region for which no marking was done, i.e. objects may
 229   // have been allocated in this part since the last mark phase.
 230   // "prev" is the top at the start of the last completed marking.
 231   // "next" is the top at the start of the in-progress marking (if any.)
 232   HeapWord* _prev_top_at_mark_start;
 233   HeapWord* _next_top_at_mark_start;
 234 
 235   // We use concurrent marking to determine the amount of live data
 236   // in each heap region.
 237   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
 238   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
 239 
 240   void init_top_at_mark_start() {
 241     assert(_prev_marked_bytes == 0 &&
 242            _next_marked_bytes == 0,
 243            "Must be called after zero_marked_bytes.");
 244     _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
 245   }
 246 
 247   size_t _survivor_bytes;       // Bytes in this region survived from previous GC.
 248 
 249   // Data for young region survivor prediction.
 250   uint  _young_index_in_cset;
 251   G1SurvRateGroup* _surv_rate_group;
 252   int  _age_index;
 253 
 254   // Cached attributes used in the collection set policy information
 255 
 256   // The calculated GC efficiency of the region.
 257   double _gc_efficiency;
 258 
 259   uint _node_index;
 260 
 261   void report_region_type_change(G1HeapRegionTraceType::Type to);
 262 
 263   // Returns whether the given object address refers to a dead object, and either the
 264   // size of the object (if live) or the size of the block (if dead) in size.
 265   // May
 266   // - only called with obj < top()
 267   // - not called on humongous objects or archive regions
 268   inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;


 399   bool is_starts_humongous() const { return _type.is_starts_humongous(); }
 400   bool is_continues_humongous() const { return _type.is_continues_humongous();   }
 401 
 402   bool is_old() const { return _type.is_old(); }
 403 
 404   bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
 405 
 406   bool is_old_or_humongous_or_archive() const { return _type.is_old_or_humongous_or_archive(); }
 407 
 408   // A pinned region contains objects which are not moved by garbage collections.
 409   // Humongous regions and archive regions are pinned.
 410   bool is_pinned() const { return _type.is_pinned(); }
 411 
 412   // An archive region is a pinned region, also tagged as old, which
 413   // should not be marked during mark/sweep. This allows the address
 414   // space to be shared by JVM instances.
 415   bool is_archive()        const { return _type.is_archive(); }
 416   bool is_open_archive()   const { return _type.is_open_archive(); }
 417   bool is_closed_archive() const { return _type.is_closed_archive(); }
 418 
 419   void set_survivor_bytes(size_t bytes) {
 420     assert(is_survivor(), "pre-condition");
 421     assert(bytes <= used(), "Survivor bytes " SIZE_FORMAT " larger than used() " SIZE_FORMAT, bytes, used());
 422     _survivor_bytes = bytes;
 423   }
 424 
 425   size_t survivor_bytes() const { return _survivor_bytes; }
 426 
 427   void set_free();
 428 
 429   void set_eden();
 430   void set_eden_pre_gc();
 431   void set_survivor();
 432 
 433   void move_to_old();
 434   void set_old();
 435 
 436   void set_open_archive();
 437   void set_closed_archive();
 438 
 439   // For a humongous region, region in which it starts.
 440   HeapRegion* humongous_start_region() const {
 441     return _humongous_start_region;
 442   }
 443 
 444   // Makes the current region be a "starts humongous" region, i.e.,
 445   // the first region in a series of one or more contiguous regions
 446   // that will contain a single "humongous" object.


 526   // Notify the region that we have finished processing self-forwarded
 527   // objects during evac failure handling.
 528   void note_self_forwarding_removal_end(size_t marked_bytes);
 529 
 530   uint index_in_opt_cset() const {
 531     assert(has_index_in_opt_cset(), "Opt cset index not set.");
 532     return _index_in_opt_cset;
 533   }
 534   bool has_index_in_opt_cset() const { return _index_in_opt_cset != InvalidCSetIndex; }
 535   void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
 536   void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
 537 
 538   void calc_gc_efficiency(void);
 539   double gc_efficiency() const { return _gc_efficiency;}
 540 
 541   uint young_index_in_cset() const { return _young_index_in_cset; }
 542   void clear_young_index_in_cset() { _young_index_in_cset = 0; }
 543   void set_young_index_in_cset(uint index) {
 544     assert(index != UINT_MAX, "just checking");
 545     assert(index != 0, "just checking");
 546     assert(is_eden(), "pre-condition" );
 547     _young_index_in_cset = index;
 548   }
 549 
 550   int age_in_surv_rate_group() const;
 551   bool has_valid_age_in_surv_rate() const;
 552 
 553   bool has_surv_rate_group() const;
 554 
 555   double surv_rate_prediction(G1Predictions const& predictor) const;
 556 
 557   void install_surv_rate_group(G1SurvRateGroup* surv_rate_group);
 558   void uninstall_surv_rate_group();
 559 
 560   void record_surv_words_in_group(size_t words_survived);
 561 
 562   // Determine if an object has been allocated since the last
 563   // mark performed by the collector. This returns true iff the object
 564   // is within the unmarked area of the region.
 565   bool obj_allocated_since_prev_marking(oop obj) const {
 566     return (HeapWord *) obj >= prev_top_at_mark_start();


< prev index next >