1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_HEAPREGION_INLINE_HPP
  26 #define SHARE_GC_G1_HEAPREGION_INLINE_HPP
  27 
  28 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
  31 #include "gc/g1/g1Predictions.hpp"
  32 #include "gc/g1/heapRegion.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/prefetch.inline.hpp"
  36 #include "utilities/align.hpp"
  37 #include "utilities/globalDefinitions.hpp"
  38 
  39 inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size,
  40                                            size_t desired_word_size,
  41                                            size_t* actual_size) {
  42   HeapWord* obj = top();
  43   size_t available = pointer_delta(end(), obj);
  44   size_t want_to_allocate = MIN2(available, desired_word_size);
  45   if (want_to_allocate >= min_word_size) {
  46     HeapWord* new_top = obj + want_to_allocate;
  47     set_top(new_top);
  48     assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment");
  49     *actual_size = want_to_allocate;
  50     return obj;
  51   } else {
  52     return NULL;
  53   }
  54 }
  55 
  56 inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size,
  57                                                size_t desired_word_size,
  58                                                size_t* actual_size) {
  59   do {
  60     HeapWord* obj = top();
  61     size_t available = pointer_delta(end(), obj);
  62     size_t want_to_allocate = MIN2(available, desired_word_size);
  63     if (want_to_allocate >= min_word_size) {
  64       HeapWord* new_top = obj + want_to_allocate;
  65       HeapWord* result = Atomic::cmpxchg(new_top, &_top, obj);
  66       // result can be one of two:
  67       //  the old top value: the exchange succeeded
  68       //  otherwise: the new value of the top is returned.
  69       if (result == obj) {
  70         assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment");
  71         *actual_size = want_to_allocate;
  72         return obj;
  73       }
  74     } else {
  75       return NULL;
  76     }
  77   } while (true);
  78 }
  79 
  80 inline HeapWord* HeapRegion::allocate(size_t min_word_size,
  81                                       size_t desired_word_size,
  82                                       size_t* actual_size) {
  83   HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size);
  84   if (res != NULL) {
  85     _bot_part.alloc_block(res, *actual_size);
  86   }
  87   return res;
  88 }
  89 
  90 inline HeapWord* HeapRegion::allocate(size_t word_size) {
  91   size_t temp;
  92   return allocate(word_size, word_size, &temp);
  93 }
  94 
  95 inline HeapWord* HeapRegion::par_allocate(size_t word_size) {
  96   size_t temp;
  97   return par_allocate(word_size, word_size, &temp);
  98 }
  99 
 100 // Because of the requirement of keeping "_offsets" up to date with the
 101 // allocations, we sequentialize these with a lock.  Therefore, best if
 102 // this is used for larger LAB allocations only.
 103 inline HeapWord* HeapRegion::par_allocate(size_t min_word_size,
 104                                           size_t desired_word_size,
 105                                           size_t* actual_size) {
 106   MutexLocker x(&_par_alloc_lock);
 107   return allocate(min_word_size, desired_word_size, actual_size);
 108 }
 109 
 110 inline HeapWord* HeapRegion::block_start(const void* p) {
 111   return _bot_part.block_start(p);
 112 }
 113 
 114 inline HeapWord* HeapRegion::block_start_const(const void* p) const {
 115   return _bot_part.block_start_const(p);
 116 }
 117 
 118 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const {
 119   HeapWord* addr = (HeapWord*) obj;
 120 
 121   assert(addr < top(), "must be");
 122   assert(!is_closed_archive(),
 123          "Closed archive regions should not have references into other regions");
 124   assert(!is_humongous(), "Humongous objects not handled here");
 125   bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
 126 
 127   if (ClassUnloadingWithConcurrentMark && obj_is_dead) {
 128     assert(!block_is_obj(addr), "must be");
 129     *size = block_size_using_bitmap(addr, prev_bitmap);
 130   } else {
 131     assert(block_is_obj(addr), "must be");
 132     *size = obj->size();
 133   }
 134   return obj_is_dead;
 135 }
 136 
 137 inline bool HeapRegion::block_is_obj(const HeapWord* p) const {
 138   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 139 
 140   if (!this->is_in(p)) {
 141     assert(is_continues_humongous(), "This case can only happen for humongous regions");
 142     return (p == humongous_start_region()->bottom());
 143   }
 144   if (ClassUnloadingWithConcurrentMark) {
 145     return !g1h->is_obj_dead(oop(p), this);
 146   }
 147   return p < top();
 148 }
 149 
 150 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const {
 151   assert(ClassUnloadingWithConcurrentMark,
 152          "All blocks should be objects if class unloading isn't used, so this method should not be called. "
 153          "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
 154          "addr: " PTR_FORMAT,
 155          p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
 156 
 157   // Old regions' dead objects may have dead classes
 158   // We need to find the next live object using the bitmap
 159   HeapWord* next = prev_bitmap->get_next_marked_addr(addr, prev_top_at_mark_start());
 160 
 161   assert(next > addr, "must get the next live object");
 162   return pointer_delta(next, addr);
 163 }
 164 
 165 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const {
 166   assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
 167   return !obj_allocated_since_prev_marking(obj) &&
 168          !prev_bitmap->is_marked((HeapWord*)obj) &&
 169          !is_open_archive();
 170 }
 171 
 172 inline size_t HeapRegion::block_size(const HeapWord *addr) const {
 173   if (addr == top()) {
 174     return pointer_delta(end(), addr);
 175   }
 176 
 177   if (block_is_obj(addr)) {
 178     return oop(addr)->size();
 179   }
 180 
 181   return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap());
 182 }
 183 
 184 inline void HeapRegion::complete_compaction() {
 185   // Reset space and bot after compaction is complete if needed.
 186   reset_after_compaction();
 187   if (is_empty()) {
 188     reset_bot();
 189   }
 190 
 191   // After a compaction the mark bitmap is invalid, so we must
 192   // treat all objects as being inside the unmarked area.
 193   zero_marked_bytes();
 194   init_top_at_mark_start();
 195 
 196   // Clear unused heap memory in debug builds.
 197   if (ZapUnusedHeapArea) {
 198     mangle_unused_area();
 199   }
 200 }
 201 
 202 template<typename ApplyToMarkedClosure>
 203 inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) {
 204   HeapWord* limit = top();
 205   HeapWord* next_addr = bottom();
 206 
 207   while (next_addr < limit) {
 208     Prefetch::write(next_addr, PrefetchScanIntervalInBytes);
 209     // This explicit is_marked check is a way to avoid
 210     // some extra work done by get_next_marked_addr for
 211     // the case where next_addr is marked.
 212     if (bitmap->is_marked(next_addr)) {
 213       oop current = oop(next_addr);
 214       next_addr += closure->apply(current);
 215     } else {
 216       next_addr = bitmap->get_next_marked_addr(next_addr, limit);
 217     }
 218   }
 219 
 220   assert(next_addr == limit, "Should stop the scan at the limit.");
 221 }
 222 
 223 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
 224                                                          size_t desired_word_size,
 225                                                          size_t* actual_word_size) {
 226   assert(is_young(), "we can only skip BOT updates on young regions");
 227   return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
 228 }
 229 
 230 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
 231   size_t temp;
 232   return allocate_no_bot_updates(word_size, word_size, &temp);
 233 }
 234 
 235 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
 236                                                      size_t desired_word_size,
 237                                                      size_t* actual_word_size) {
 238   assert(is_young(), "we can only skip BOT updates on young regions");
 239   return allocate_impl(min_word_size, desired_word_size, actual_word_size);
 240 }
 241 
 242 inline void HeapRegion::note_start_of_marking() {
 243   _next_marked_bytes = 0;
 244   _next_top_at_mark_start = top();
 245 }
 246 
 247 inline void HeapRegion::note_end_of_marking() {
 248   _prev_top_at_mark_start = _next_top_at_mark_start;
 249   _next_top_at_mark_start = bottom();
 250   _prev_marked_bytes = _next_marked_bytes;
 251   _next_marked_bytes = 0;
 252 }
 253 
 254 inline bool HeapRegion::in_collection_set() const {
 255   return G1CollectedHeap::heap()->is_in_cset(this);
 256 }
 257 
 258 template <class Closure, bool is_gc_active>
 259 HeapWord* HeapRegion::do_oops_on_memregion_in_humongous(MemRegion mr,
 260                                                         Closure* cl,
 261                                                         G1CollectedHeap* g1h) {
 262   assert(is_humongous(), "precondition");
 263   HeapRegion* sr = humongous_start_region();
 264   oop obj = oop(sr->bottom());
 265 
 266   // If concurrent and klass_or_null is NULL, then space has been
 267   // allocated but the object has not yet been published by setting
 268   // the klass.  That can only happen if the card is stale.  However,
 269   // we've already set the card clean, so we must return failure,
 270   // since the allocating thread could have performed a write to the
 271   // card that might be missed otherwise.
 272   if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) {
 273     return NULL;
 274   }
 275 
 276   // We have a well-formed humongous object at the start of sr.
 277   // Only filler objects follow a humongous object in the containing
 278   // regions, and we can ignore those.  So only process the one
 279   // humongous object.
 280   if (g1h->is_obj_dead(obj, sr)) {
 281     // The object is dead. There can be no other object in this region, so return
 282     // the end of that region.
 283     return end();
 284   }
 285   if (obj->is_objArray() || (sr->bottom() < mr.start())) {
 286     // objArrays are always marked precisely, so limit processing
 287     // with mr.  Non-objArrays might be precisely marked, and since
 288     // it's humongous it's worthwhile avoiding full processing.
 289     // However, the card could be stale and only cover filler
 290     // objects.  That should be rare, so not worth checking for;
 291     // instead let it fall out from the bounded iteration.
 292     obj->oop_iterate(cl, mr);
 293     return mr.end();
 294   } else {
 295     // If obj is not an objArray and mr contains the start of the
 296     // obj, then this could be an imprecise mark, and we need to
 297     // process the entire object.
 298     int size = obj->oop_iterate_size(cl);
 299     // We have scanned to the end of the object, but since there can be no objects
 300     // after this humongous object in the region, we can return the end of the
 301     // region if it is greater.
 302     return MAX2((HeapWord*)obj + size, mr.end());
 303   }
 304 }
 305 
 306 template <bool is_gc_active, class Closure>
 307 HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
 308                                                             Closure* cl) {
 309   assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
 310   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 311 
 312   // Special handling for humongous regions.
 313   if (is_humongous()) {
 314     return do_oops_on_memregion_in_humongous<Closure, is_gc_active>(mr, cl, g1h);
 315   }
 316   assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str());
 317 
 318   // Because mr has been trimmed to what's been allocated in this
 319   // region, the parts of the heap that are examined here are always
 320   // parsable; there's no need to use klass_or_null to detect
 321   // in-progress allocation.
 322 
 323   // Cache the boundaries of the memory region in some const locals
 324   HeapWord* const start = mr.start();
 325   HeapWord* const end = mr.end();
 326 
 327   // Find the obj that extends onto mr.start().
 328   // Update BOT as needed while finding start of (possibly dead)
 329   // object containing the start of the region.
 330   HeapWord* cur = block_start(start);
 331 
 332 #ifdef ASSERT
 333   {
 334     assert(cur <= start,
 335            "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start));
 336     HeapWord* next = cur + block_size(cur);
 337     assert(start < next,
 338            "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next));
 339   }
 340 #endif
 341 
 342   const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap();
 343   while (true) {
 344     oop obj = oop(cur);
 345     assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur));
 346     assert(obj->klass_or_null() != NULL,
 347            "Unparsable heap at " PTR_FORMAT, p2i(cur));
 348 
 349     size_t size;
 350     bool is_dead = is_obj_dead_with_size(obj, bitmap, &size);
 351     bool is_precise = false;
 352 
 353     cur += size;
 354     if (!is_dead) {
 355       // Process live object's references.
 356 
 357       // Non-objArrays are usually marked imprecise at the object
 358       // start, in which case we need to iterate over them in full.
 359       // objArrays are precisely marked, but can still be iterated
 360       // over in full if completely covered.
 361       if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
 362         obj->oop_iterate(cl);
 363       } else {
 364         obj->oop_iterate(cl, mr);
 365         is_precise = true;
 366       }
 367     }
 368     if (cur >= end) {
 369       return is_precise ? end : cur;
 370     }
 371   }
 372 }
 373 
 374 inline int HeapRegion::age_in_surv_rate_group() const {
 375   assert(has_surv_rate_group(), "pre-condition");
 376   assert(has_valid_age_in_surv_rate(), "pre-condition");
 377   return _surv_rate_group->age_in_group(_age_index);
 378 }
 379 
 380 inline bool HeapRegion::has_valid_age_in_surv_rate() const {
 381   return SurvRateGroup::is_valid_age_index(_age_index);
 382 }
 383 
 384 inline bool HeapRegion::has_surv_rate_group() const {
 385   return _surv_rate_group != NULL;
 386 }
 387   
 388 inline double HeapRegion::surv_rate_prediction(G1Predictions const& predictor) const {
 389   assert(has_surv_rate_group(), "pre-condition");
 390   return _surv_rate_group->surv_rate_pred(predictor, age_in_surv_rate_group());
 391 }
 392 
 393 inline void HeapRegion::install_surv_rate_group(SurvRateGroup* surv_rate_group) {
 394   assert(surv_rate_group != NULL, "pre-condition");
 395   assert(!has_surv_rate_group(), "pre-condition");
 396   assert(is_young(), "pre-condition");
 397 
 398   _surv_rate_group = surv_rate_group;
 399   _age_index = surv_rate_group->next_age_index();
 400 }
 401 
 402 inline void HeapRegion::uninstall_surv_rate_group() {
 403   if (has_surv_rate_group()) {
 404     assert(has_valid_age_in_surv_rate(), "pre-condition");
 405     assert(is_young(), "pre-condition");
 406 
 407     _surv_rate_group = NULL;
 408     _age_index = SurvRateGroup::InvalidAgeIndex;
 409   } else {
 410     assert(!has_valid_age_in_surv_rate(), "pre-condition");
 411   }
 412 }
 413 
 414 inline void HeapRegion::record_surv_words_in_group(size_t words_survived) {
 415   assert(has_surv_rate_group(), "pre-condition");
 416   assert(has_valid_age_in_surv_rate(), "pre-condition");
 417   int age_in_group = age_in_surv_rate_group();
 418   _surv_rate_group->record_surviving_words(age_in_group, words_survived);
 419 }
 420 
 421 #endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP