1 /*
   2  * Copyright (c) 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "gc/shenandoah/brooksPointer.hpp"
  29 #include "gc/shenandoah/shenandoahAsserts.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  32 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/prefetch.inline.hpp"
  36 
  37 template <class T, bool COUNT_LIVENESS>
  38 void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task) {
  39   oop obj = task->obj();
  40 
  41   shenandoah_assert_not_forwarded_except(NULL, obj, _heap->is_concurrent_traversal_in_progress() && _heap->cancelled_concgc());
  42   shenandoah_assert_marked_next(NULL, obj);
  43   shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_concgc());
  44 
  45   if (task->is_not_chunked()) {
  46     if (COUNT_LIVENESS) count_liveness(live_data, obj);
  47     if (obj->is_instance()) {
  48       // Case 1: Normal oop, process as usual.
  49       obj->oop_iterate(cl);
  50     } else if (obj->is_objArray()) {
  51       // Case 2: Object array instance and no chunk is set. Must be the first
  52       // time we visit it, start the chunked processing.
  53       do_chunked_array_start<T>(q, cl, obj);
  54     } else {
  55       // Case 3: Primitive array. Do nothing, no oops there. We use the same
  56       // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
  57       // We skip iterating over the klass pointer since we know that
  58       // Universe::TypeArrayKlass never moves.
  59       assert (obj->is_typeArray(), "should be type array");
  60     }
  61   } else {
  62     // Case 4: Array chunk, has sensible chunk id. Process it.
  63     do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow());
  64   }
  65 }
  66 
  67 inline void ShenandoahConcurrentMark::count_liveness(jushort* live_data, oop obj) {
  68   size_t region_idx = _heap->heap_region_index_containing(obj);
  69   ShenandoahHeapRegion* region = _heap->get_region(region_idx);
  70   if (!region->is_humongous_start()) {
  71     assert(!region->is_humongous(), "Cannot have continuations here");
  72     jushort cur = live_data[region_idx];
  73     size_t size = obj->size() + BrooksPointer::word_size();
  74     size_t max = (1 << (sizeof(jushort) * 8)) - 1;
  75     if (size >= max) {
  76       // too big, add to region data directly
  77       region->increase_live_data_gc_words(size);
  78     } else {
  79       size_t new_val = cur + size;
  80       if (new_val >= max) {
  81         // overflow, flush to region data
  82         region->increase_live_data_gc_words(new_val);
  83         live_data[region_idx] = 0;
  84       } else {
  85         // still good, remember in locals
  86         live_data[region_idx] = (jushort) new_val;
  87       }
  88     }
  89   } else {
  90     count_liveness_humongous(obj);
  91   }
  92 }
  93 
  94 inline void ShenandoahConcurrentMark::count_liveness_humongous(oop obj) {
  95   shenandoah_assert_in_correct_region(NULL, obj);
  96   size_t region_idx = _heap->heap_region_index_containing(obj);
  97   int size = obj->size() + BrooksPointer::word_size();
  98   size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
  99   for (size_t i = region_idx; i < region_idx + num_regions; i++) {
 100     ShenandoahHeapRegion* chain_reg = _heap->get_region(i);
 101     chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
 102   }
 103 }
 104 
 105 template <class T>
 106 inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) {
 107   assert(obj->is_objArray(), "expect object array");
 108   objArrayOop array = objArrayOop(obj);
 109   int len = array->length();
 110 
 111   if (len <= (int) ObjArrayMarkingStride*2) {
 112     // A few slices only, process directly
 113     array->oop_iterate_range(cl, 0, len);
 114   } else {
 115     int bits = log2_long(len);
 116     // Compensate for non-power-of-two arrays, cover the array in excess:
 117     if (len != (1 << bits)) bits++;
 118 
 119     // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
 120     // boundaries against array->length(), touching the array header on every chunk.
 121     //
 122     // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.
 123     // If the array is not divided in chunk sizes, then there would be an irregular tail,
 124     // which we will process separately.
 125 
 126     int last_idx = 0;
 127 
 128     int chunk = 1;
 129     int pow = bits;
 130 
 131     // Handle overflow
 132     if (pow >= 31) {
 133       assert (pow == 31, "sanity");
 134       pow--;
 135       chunk = 2;
 136       last_idx = (1 << pow);
 137       bool pushed = q->push(ShenandoahMarkTask(array, 1, pow));
 138       assert(pushed, "overflow queue should always succeed pushing");
 139     }
 140 
 141     // Split out tasks, as suggested in ObjArrayChunkedTask docs. Record the last
 142     // successful right boundary to figure out the irregular tail.
 143     while ((1 << pow) > (int)ObjArrayMarkingStride &&
 144            (chunk*2 < ShenandoahMarkTask::chunk_size())) {
 145       pow--;
 146       int left_chunk = chunk*2 - 1;
 147       int right_chunk = chunk*2;
 148       int left_chunk_end = left_chunk * (1 << pow);
 149       if (left_chunk_end < len) {
 150         bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow));
 151         assert(pushed, "overflow queue should always succeed pushing");
 152         chunk = right_chunk;
 153         last_idx = left_chunk_end;
 154       } else {
 155         chunk = left_chunk;
 156       }
 157     }
 158 
 159     // Process the irregular tail, if present
 160     int from = last_idx;
 161     if (from < len) {
 162       array->oop_iterate_range(cl, from, len);
 163     }
 164   }
 165 }
 166 
 167 template <class T>
 168 inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) {
 169   assert(obj->is_objArray(), "expect object array");
 170   objArrayOop array = objArrayOop(obj);
 171 
 172   assert (ObjArrayMarkingStride > 0, "sanity");
 173 
 174   // Split out tasks, as suggested in ObjArrayChunkedTask docs. Avoid pushing tasks that
 175   // are known to start beyond the array.
 176   while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
 177     pow--;
 178     chunk *= 2;
 179     bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow));
 180     assert(pushed, "overflow queue should always succeed pushing");
 181   }
 182 
 183   int chunk_size = 1 << pow;
 184 
 185   int from = (chunk - 1) * chunk_size;
 186   int to = chunk * chunk_size;
 187 
 188 #ifdef ASSERT
 189   int len = array->length();
 190   assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
 191   assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
 192 #endif
 193 
 194   array->oop_iterate_range(cl, from, to);
 195 }
 196 
 197 inline bool ShenandoahConcurrentMark::try_queue(ShenandoahObjToScanQueue* q, ShenandoahMarkTask &task) {
 198   return (q->pop_buffer(task) ||
 199           q->pop_local(task) ||
 200           q->pop_overflow(task));
 201 }
 202 
 203 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
 204 private:
 205   ShenandoahObjToScanQueue* _queue;
 206   ShenandoahHeap* _heap;
 207 public:
 208   ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 209     _queue(q), _heap(ShenandoahHeap::heap())
 210   {
 211   }
 212 
 213   void do_buffer(void** buffer, size_t size) {
 214     for (size_t i = 0; i < size; ++i) {
 215       oop* p = (oop*) &buffer[i];
 216       ShenandoahConcurrentMark::mark_through_ref<oop, RESOLVE>(p, _heap, _queue);
 217     }
 218   }
 219 };
 220 
 221 inline bool ShenandoahConcurrentMark::try_draining_satb_buffer(ShenandoahObjToScanQueue *q, ShenandoahMarkTask &task) {
 222   ShenandoahSATBBufferClosure cl(q);
 223   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 224   bool had_refs = satb_mq_set.apply_closure_to_completed_buffer(&cl);
 225   return had_refs && try_queue(q, task);
 226 }
 227 
 228 template<class T, UpdateRefsMode UPDATE_REFS>
 229 inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q) {
 230   ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, false /* string dedup */>(p, heap, q, NULL);
 231 }
 232 
 233 template<class T, UpdateRefsMode UPDATE_REFS, bool STRING_DEDUP>
 234 inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) {
 235   T o = RawAccess<>::oop_load(p);
 236   if (!CompressedOops::is_null(o)) {
 237     oop obj = CompressedOops::decode_not_null(o);
 238     switch (UPDATE_REFS) {
 239     case NONE:
 240       break;
 241     case RESOLVE:
 242       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 243       break;
 244     case SIMPLE:
 245       // We piggy-back reference updating to the marking tasks.
 246       obj = heap->update_with_forwarded_not_null(p, obj);
 247       break;
 248     case CONCURRENT:
 249       obj = heap->maybe_update_with_forwarded_not_null(p, obj);
 250       break;
 251     default:
 252       ShouldNotReachHere();
 253     }
 254 
 255     // Note: Only when concurrently updating references can obj become NULL here.
 256     // It happens when a mutator thread beats us by writing another value. In that
 257     // case we don't need to do anything else.
 258     if (UPDATE_REFS != CONCURRENT || !CompressedOops::is_null(obj)) {
 259       shenandoah_assert_not_forwarded(p, obj);
 260       shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_concgc());
 261 
 262       if (heap->mark_next(obj)) {
 263         bool pushed = q->push(ShenandoahMarkTask(obj));
 264         assert(pushed, "overflow queue should always succeed pushing");
 265 
 266         if (STRING_DEDUP && ShenandoahStringDedup::is_candidate(obj)) {
 267           assert(ShenandoahStringDedup::is_enabled(), "Must be enabled");
 268           assert(dq != NULL, "Dedup queue not set");
 269           ShenandoahStringDedup::enqueue_candidate(obj, dq);
 270         }
 271       }
 272 
 273       shenandoah_assert_marked_next(p, obj);
 274     }
 275   }
 276 }
 277 
 278 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP