1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
  26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
  27 
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1ConcurrentMark.hpp"
  30 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
  31 #include "gc/g1/suspendibleThreadSet.hpp"
  32 #include "gc/shared/taskqueue.inline.hpp"
  33 
  34 inline bool G1ConcurrentMark::par_mark(oop obj) {
  35   return _nextMarkBitMap->parMark((HeapWord*)obj);
  36 }
  37 
  38 inline bool G1CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
  39   HeapWord* start_addr = MAX2(startWord(), mr.start());
  40   HeapWord* end_addr = MIN2(endWord(), mr.end());
  41 
  42   if (end_addr > start_addr) {
  43     // Right-open interval [start-offset, end-offset).
  44     BitMap::idx_t start_offset = heapWordToOffset(start_addr);
  45     BitMap::idx_t end_offset = heapWordToOffset(end_addr);
  46 
  47     start_offset = _bm.get_next_one_offset(start_offset, end_offset);
  48     while (start_offset < end_offset) {
  49       if (!cl->do_bit(start_offset)) {
  50         return false;
  51       }
  52       HeapWord* next_addr = MIN2(nextObject(offsetToHeapWord(start_offset)), end_addr);
  53       BitMap::idx_t next_offset = heapWordToOffset(next_addr);
  54       start_offset = _bm.get_next_one_offset(next_offset, end_offset);
  55     }
  56   }
  57   return true;
  58 }
  59 
  60 // The argument addr should be the start address of a valid object
  61 HeapWord* G1CMBitMapRO::nextObject(HeapWord* addr) {
  62   oop obj = (oop) addr;
  63   HeapWord* res =  addr + obj->size();
  64   assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
  65   return res;
  66 }
  67 
  68 #define check_mark(addr)                                                       \
  69   assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize),      \
  70          "outside underlying space?");                                         \
  71   assert(G1CollectedHeap::heap()->is_in_exact(addr),                           \
  72          "Trying to access not available bitmap " PTR_FORMAT                   \
  73          " corresponding to " PTR_FORMAT " (%u)",                              \
  74          p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr));
  75 
  76 inline void G1CMBitMap::mark(HeapWord* addr) {
  77   check_mark(addr);
  78   _bm.set_bit(heapWordToOffset(addr));
  79 }
  80 
  81 inline void G1CMBitMap::clear(HeapWord* addr) {
  82   check_mark(addr);
  83   _bm.clear_bit(heapWordToOffset(addr));
  84 }
  85 
  86 inline bool G1CMBitMap::parMark(HeapWord* addr) {
  87   check_mark(addr);
  88   return _bm.par_set_bit(heapWordToOffset(addr));
  89 }
  90 
  91 #undef check_mark
  92 
  93 #ifndef PRODUCT
  94 template<typename Fn>
  95 inline void G1CMMarkStack::iterate(Fn fn) const {
  96   assert_at_safepoint(true);
  97 
  98   size_t num_chunks = 0;
  99 
 100   OopChunk* cur = _chunk_list;
 101   while (cur != NULL) {
 102     guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks);
 103 
 104     for (size_t i = 0; i < EntriesPerChunk; ++i) {
 105       if (cur->data[i].is_null()) {
 106         break;
 107       }
 108       fn(cur->data[i]);
 109     }
 110     cur = cur->next;
 111     num_chunks++;
 112   }
 113 }
 114 #endif
 115 
 116 // It scans an object and visits its children.
 117 inline void G1CMTask::scan_object(G1TaskQueueEntry task_entry) { process_grey_object<true>(task_entry); }
 118 
 119 inline void G1CMTask::push(G1TaskQueueEntry task_entry) {
 120   assert(task_entry.is_array_slice() || _g1h->is_in_g1_reserved(task_entry.obj()), "invariant");
 121   assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
 122               _g1h->heap_region_containing(task_entry.obj())), "invariant");
 123   assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant");  // FIXME!!!
 124   assert(task_entry.is_array_slice() || _nextMarkBitMap->isMarked((HeapWord*)task_entry.obj()), "invariant");
 125 
 126   if (!_task_queue->push(task_entry)) {
 127     // The local task queue looks full. We need to push some entries
 128     // to the global stack.
 129     move_entries_to_global_stack();
 130 
 131     // this should succeed since, even if we overflow the global
 132     // stack, we should have definitely removed some entries from the
 133     // local queue. So, there must be space on it.
 134     bool success = _task_queue->push(task_entry);
 135     assert(success, "invariant");
 136   }
 137 }
 138 
 139 inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
 140   // If obj is above the global finger, then the mark bitmap scan
 141   // will find it later, and no push is needed.  Similarly, if we have
 142   // a current region and obj is between the local finger and the
 143   // end of the current region, then no push is needed.  The tradeoff
 144   // of checking both vs only checking the global finger is that the
 145   // local check will be more accurate and so result in fewer pushes,
 146   // but may also be a little slower.
 147   HeapWord* objAddr = (HeapWord*)obj;
 148   if (_finger != NULL) {
 149     // We have a current region.
 150 
 151     // Finger and region values are all NULL or all non-NULL.  We
 152     // use _finger to check since we immediately use its value.
 153     assert(_curr_region != NULL, "invariant");
 154     assert(_region_limit != NULL, "invariant");
 155     assert(_region_limit <= global_finger, "invariant");
 156 
 157     // True if obj is less than the local finger, or is between
 158     // the region limit and the global finger.
 159     if (objAddr < _finger) {
 160       return true;
 161     } else if (objAddr < _region_limit) {
 162       return false;
 163     } // Else check global finger.
 164   }
 165   // Check global finger.
 166   return objAddr < global_finger;
 167 }
 168 
 169 template<bool scan>
 170 inline void G1CMTask::process_grey_object(G1TaskQueueEntry task_entry) {
 171   assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray");
 172   assert(task_entry.is_array_slice() || _nextMarkBitMap->isMarked((HeapWord*)task_entry.obj()),
 173          "Any stolen object should be a slice or marked");
 174 
 175   if (scan) {
 176     if (task_entry.is_array_slice()) {
 177       _words_scanned += _objArray_processor.process_slice(task_entry.slice());
 178     } else {
 179       oop obj = task_entry.obj();
 180       if (G1CMObjArrayProcessor::should_be_sliced(obj)) {
 181         _words_scanned += _objArray_processor.process_obj(obj);
 182       } else {
 183         _words_scanned += obj->oop_iterate_size(_cm_oop_closure);;
 184       }
 185     }
 186   }
 187   check_limits();
 188 }
 189 
 190 inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) {
 191   obj->oop_iterate(_cm_oop_closure, mr);
 192   return mr.word_size();
 193 }
 194 
 195 inline void G1CMTask::make_reference_grey(oop obj) {
 196   if (_cm->par_mark(obj)) {
 197     // No OrderAccess:store_load() is needed. It is implicit in the
 198     // CAS done in G1CMBitMap::parMark() call in the routine above.
 199     HeapWord* global_finger = _cm->finger();
 200 
 201     // We only need to push a newly grey object on the mark
 202     // stack if it is in a section of memory the mark bitmap
 203     // scan has already examined.  Mark bitmap scanning
 204     // maintains progress "fingers" for determining that.
 205     //
 206     // Notice that the global finger might be moving forward
 207     // concurrently. This is not a problem. In the worst case, we
 208     // mark the object while it is above the global finger and, by
 209     // the time we read the global finger, it has moved forward
 210     // past this object. In this case, the object will probably
 211     // be visited when a task is scanning the region and will also
 212     // be pushed on the stack. So, some duplicate work, but no
 213     // correctness problems.
 214     if (is_below_finger(obj, global_finger)) {
 215       if (obj->is_typeArray()) {
 216         // Immediately process arrays of primitive types, rather
 217         // than pushing on the mark stack.  This keeps us from
 218         // adding humongous objects to the mark stack that might
 219         // be reclaimed before the entry is processed - see
 220         // selection of candidates for eager reclaim of humongous
 221         // objects.  The cost of the additional type test is
 222         // mitigated by avoiding a trip through the mark stack,
 223         // by only doing a bookkeeping update and avoiding the
 224         // actual scan of the object - a typeArray contains no
 225         // references, and the metadata is built-in.
 226         process_grey_object<false>(obj);
 227       } else {
 228         push(obj);
 229       }
 230     }
 231   }
 232 }
 233 
 234 inline void G1CMTask::deal_with_reference(oop obj) {
 235   increment_refs_reached();
 236 
 237   HeapWord* objAddr = (HeapWord*) obj;
 238   assert(obj->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
 239   if (_g1h->is_in_g1_reserved(objAddr)) {
 240     assert(obj != NULL, "null check is implicit");
 241     if (!_nextMarkBitMap->isMarked(objAddr)) {
 242       // Only get the containing region if the object is not marked on the
 243       // bitmap (otherwise, it's a waste of time since we won't do
 244       // anything with it).
 245       HeapRegion* hr = _g1h->heap_region_containing(obj);
 246       if (!hr->obj_allocated_since_next_marking(obj)) {
 247         make_reference_grey(obj);
 248       }
 249     }
 250   }
 251 }
 252 
 253 inline void G1ConcurrentMark::markPrev(oop p) {
 254   assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
 255   // Note we are overriding the read-only view of the prev map here, via
 256   // the cast.
 257   ((G1CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
 258 }
 259 
 260 bool G1ConcurrentMark::isPrevMarked(oop p) const {
 261   assert(p != NULL && p->is_oop(), "expected an oop");
 262   HeapWord* addr = (HeapWord*)p;
 263   assert(addr >= _prevMarkBitMap->startWord() ||
 264          addr < _prevMarkBitMap->endWord(), "in a region");
 265 
 266   return _prevMarkBitMap->isMarked(addr);
 267 }
 268 
 269 inline void G1ConcurrentMark::grayRoot(oop obj, HeapRegion* hr) {
 270   assert(obj != NULL, "pre-condition");
 271   HeapWord* addr = (HeapWord*) obj;
 272   if (hr == NULL) {
 273     hr = _g1h->heap_region_containing(addr);
 274   } else {
 275     assert(hr->is_in(addr), "pre-condition");
 276   }
 277   assert(hr != NULL, "sanity");
 278   // Given that we're looking for a region that contains an object
 279   // header it's impossible to get back a HC region.
 280   assert(!hr->is_continues_humongous(), "sanity");
 281 
 282   if (addr < hr->next_top_at_mark_start()) {
 283     if (!_nextMarkBitMap->isMarked(addr)) {
 284       par_mark(obj);
 285     }
 286   }
 287 }
 288 
 289 inline bool G1ConcurrentMark::do_yield_check() {
 290   if (SuspendibleThreadSet::should_yield()) {
 291     SuspendibleThreadSet::yield();
 292     return true;
 293   } else {
 294     return false;
 295   }
 296 }
 297 
 298 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP