1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "gc/shared/referencePolicy.hpp"
  27 #include "gc/shared/referenceProcessorStats.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zOopClosures.inline.hpp"
  30 #include "gc/z/zReferenceProcessor.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "gc/z/zTask.hpp"
  33 #include "gc/z/zTracer.inline.hpp"
  34 #include "gc/z/zUtils.inline.hpp"
  35 #include "memory/universe.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 #include "runtime/os.hpp"
  38 
  39 static const ZStatSubPhase ZSubPhaseConcurrentReferencesProcess("Concurrent References Process");
  40 static const ZStatSubPhase ZSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue");
  41 
  42 ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers) :
  43     _workers(workers),
  44     _soft_reference_policy(NULL),
  45     _encountered_count(),
  46     _discovered_count(),
  47     _enqueued_count(),
  48     _discovered_list(NULL),
  49     _pending_list(NULL),
  50     _pending_list_tail(_pending_list.addr()) {}
  51 
  52 void ZReferenceProcessor::set_soft_reference_policy(bool clear) {
  53   static AlwaysClearPolicy always_clear_policy;
  54   static LRUMaxHeapPolicy lru_max_heap_policy;
  55 
  56   if (clear) {
  57     log_info(gc, ref)("Clearing All Soft References");
  58     _soft_reference_policy = &always_clear_policy;
  59   } else {
  60     _soft_reference_policy = &lru_max_heap_policy;
  61   }
  62 
  63   _soft_reference_policy->setup();
  64 }
  65 
  66 void ZReferenceProcessor::update_soft_reference_clock() const {
  67   const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  68   java_lang_ref_SoftReference::set_clock(now);
  69 }
  70 
  71 bool ZReferenceProcessor::is_reference_inactive(oop obj) const {
  72   // A non-null next field means the reference is inactive
  73   return java_lang_ref_Reference::next(obj) != NULL;
  74 }
  75 
  76 ReferenceType ZReferenceProcessor::reference_type(oop obj) const {
  77   return InstanceKlass::cast(obj->klass())->reference_type();
  78 }
  79 
  80 volatile oop* ZReferenceProcessor::reference_referent_addr(oop obj) const {
  81   return (volatile oop*)java_lang_ref_Reference::referent_addr(obj);
  82 }
  83 
  84 oop ZReferenceProcessor::reference_referent(oop obj) const {
  85   return *reference_referent_addr(obj);
  86 }
  87 
  88 bool ZReferenceProcessor::is_referent_alive_or_null(oop obj, ReferenceType type) const {
  89   volatile oop* const p = reference_referent_addr(obj);
  90 
  91   // Check if the referent is alive or null, in which case we don't want to discover
  92   // the reference. It can only be null if the application called Reference.clear().
  93   if (type == REF_PHANTOM) {
  94     const oop o = ZBarrier::weak_load_barrier_on_phantom_oop_field(p);
  95     return o == NULL || ZHeap::heap()->is_object_live(ZOop::to_address(o));
  96   } else {
  97     const oop o = ZBarrier::weak_load_barrier_on_weak_oop_field(p);
  98     return o == NULL || ZHeap::heap()->is_object_strongly_live(ZOop::to_address(o));
  99   }
 100 }
 101 
 102 bool ZReferenceProcessor::is_referent_softly_alive(oop obj, ReferenceType type) const {
 103   if (type != REF_SOFT) {
 104     // Not a soft reference
 105     return false;
 106   }
 107 
 108   // Ask soft reference policy
 109   const jlong clock = java_lang_ref_SoftReference::clock();
 110   assert(clock != 0, "Clock not initialized");
 111   assert(_soft_reference_policy != NULL, "Policy not initialized");
 112   return !_soft_reference_policy->should_clear_reference(obj, clock);
 113 }
 114 
 115 bool ZReferenceProcessor::should_drop_reference(oop obj, ReferenceType type) const {
 116   // This check is racing with a call to Reference.clear() from the application.
 117   // If the application clears the reference after this check it will still end
 118   // up on the pending list, and there's nothing we can do about that without
 119   // changing the Reference.clear() API.
 120   const oop o = reference_referent(obj);
 121   if (o == NULL) {
 122     // Reference has already been cleared, by an application call to
 123     // Reference.clear(), which means we should drop the reference.
 124     return true;
 125   }
 126 
 127   // Check if the referent is still alive, in which case we should
 128   // drop the reference.
 129   if (type == REF_PHANTOM) {
 130     return ZBarrier::is_alive_barrier_on_phantom_oop(o);
 131   } else {
 132     return ZBarrier::is_alive_barrier_on_weak_oop(o);
 133   }
 134 }
 135 
 136 bool ZReferenceProcessor::should_mark_referent(ReferenceType type) const {
 137   // Referents of final references (and its reachable sub graph) are
 138   // always marked finalizable during discovery. This avoids the problem
 139   // of later having to mark those objects if the referent is still final
 140   // reachable during processing.
 141   return type == REF_FINAL;
 142 }
 143 
 144 bool ZReferenceProcessor::should_clear_referent(ReferenceType type) const {
 145   // Referents that were not marked must be cleared
 146   return !should_mark_referent(type);
 147 }
 148 
 149 void ZReferenceProcessor::keep_referent_alive(oop obj, ReferenceType type) const {
 150   volatile oop* const p = reference_referent_addr(obj);
 151   if (type == REF_PHANTOM) {
 152     ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
 153   } else {
 154     ZBarrier::keep_alive_barrier_on_weak_oop_field(p);
 155   }
 156 }
 157 
 158 bool ZReferenceProcessor::discover_reference(oop obj, ReferenceType type) {
 159   if (!RegisterReferences) {
 160     // Reference processing disabled
 161     return false;
 162   }
 163 
 164   // Update statistics
 165   _encountered_count.get()[type]++;
 166 
 167   if (is_reference_inactive(obj) ||
 168       is_referent_alive_or_null(obj, type) ||
 169       is_referent_softly_alive(obj, type)) {
 170     // Not discovered
 171     return false;
 172   }
 173 
 174   discover(obj, type);
 175 
 176   // Discovered
 177   return true;
 178 }
 179 
 180 void ZReferenceProcessor::discover(oop obj, ReferenceType type) {
 181   log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(obj), ReferenceTypeName[type]);
 182 
 183   // Update statistics
 184   _discovered_count.get()[type]++;
 185 
 186   // Mark referent finalizable
 187   if (should_mark_referent(type)) {
 188     oop* const referent_addr = (oop*)java_lang_ref_Reference::referent_addr(obj);
 189     ZBarrier::mark_barrier_on_oop_field(referent_addr, true /* finalizable */);
 190   }
 191 
 192   // Add reference to discovered list
 193   assert(java_lang_ref_Reference::discovered(obj) == NULL, "Already discovered");
 194   oop* const list = _discovered_list.addr();
 195   java_lang_ref_Reference::set_discovered(obj, *list);
 196   *list = obj;
 197 }
 198 
 199 oop ZReferenceProcessor::drop(oop obj, ReferenceType type) {
 200   log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(obj), ReferenceTypeName[type]);
 201 
 202   // Keep referent alive
 203   keep_referent_alive(obj, type);
 204 
 205   // Unlink and return next in list
 206   const oop next = java_lang_ref_Reference::discovered(obj);
 207   java_lang_ref_Reference::set_discovered(obj, NULL);
 208   return next;
 209 }
 210 
 211 oop* ZReferenceProcessor::keep(oop obj, ReferenceType type) {
 212   log_trace(gc, ref)("Pending Reference: " PTR_FORMAT " (%s)", p2i(obj), ReferenceTypeName[type]);
 213 
 214   // Update statistics
 215   _enqueued_count.get()[type]++;
 216 
 217   // Clear referent
 218   if (should_clear_referent(type)) {
 219     java_lang_ref_Reference::set_referent(obj, NULL);
 220   }
 221 
 222   // Make reference inactive by self-looping the next field. We could be racing with a
 223   // call to Reference.enqueue() from the application, which is why we are using a CAS
 224   // to make sure we change the next field only if it is NULL. A failing CAS means the
 225   // reference has already been enqueued. However, we don't check the result of the CAS,
 226   // since we still have no option other than keeping the reference on the pending list.
 227   // It's ok to have the reference both on the pending list and enqueued at the same
 228   // time (the pending list is linked through the discovered field, while the reference
 229   // queue is linked through the next field). When the ReferenceHandler thread later
 230   // calls Reference.enqueue() we detect that it has already been enqueued and drop it.
 231   oop* const next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
 232   Atomic::cmpxchg(obj, next_addr, oop(NULL));
 233 
 234   // Return next in list
 235   return (oop*)java_lang_ref_Reference::discovered_addr(obj);
 236 }
 237 
 238 void ZReferenceProcessor::work() {
 239   // Process discovered references
 240   oop* const list = _discovered_list.addr();
 241   oop* p = list;
 242 
 243   while (*p != NULL) {
 244     const oop obj = *p;
 245     const ReferenceType type = reference_type(obj);
 246 
 247     if (should_drop_reference(obj, type)) {
 248       *p = drop(obj, type);
 249     } else {
 250       p = keep(obj, type);
 251     }
 252   }
 253 
 254   // Prepend discovered references to internal pending list
 255   if (*list != NULL) {
 256     *p = Atomic::xchg(*list, _pending_list.addr());
 257     if (*p == NULL) {
 258       // First to prepend to list, record tail
 259       _pending_list_tail = p;
 260     }
 261 
 262     // Clear discovered list
 263     *list = NULL;
 264   }
 265 }
 266 
 267 bool ZReferenceProcessor::is_empty() const {
 268   ZPerWorkerConstIterator<oop> iter(&_discovered_list);
 269   for (const oop* list; iter.next(&list);) {
 270     if (*list != NULL) {
 271       return false;
 272     }
 273   }
 274 
 275   if (_pending_list.get() != NULL) {
 276     return false;
 277   }
 278 
 279   return true;
 280 }
 281 
 282 void ZReferenceProcessor::reset_statistics() {
 283   assert(is_empty(), "Should be empty");
 284 
 285   // Reset encountered
 286   ZPerWorkerIterator<Counters> iter_encountered(&_encountered_count);
 287   for (Counters* counters; iter_encountered.next(&counters);) {
 288     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 289       (*counters)[i] = 0;
 290     }
 291   }
 292 
 293   // Reset discovered
 294   ZPerWorkerIterator<Counters> iter_discovered(&_discovered_count);
 295   for (Counters* counters; iter_discovered.next(&counters);) {
 296     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 297       (*counters)[i] = 0;
 298     }
 299   }
 300 
 301   // Reset enqueued
 302   ZPerWorkerIterator<Counters> iter_enqueued(&_enqueued_count);
 303   for (Counters* counters; iter_enqueued.next(&counters);) {
 304     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 305       (*counters)[i] = 0;
 306     }
 307   }
 308 }
 309 
 310 void ZReferenceProcessor::collect_statistics() {
 311   Counters encountered = {};
 312   Counters discovered = {};
 313   Counters enqueued = {};
 314 
 315   // Sum encountered
 316   ZPerWorkerConstIterator<Counters> iter_encountered(&_encountered_count);
 317   for (const Counters* counters; iter_encountered.next(&counters);) {
 318     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 319       encountered[i] += (*counters)[i];
 320     }
 321   }
 322 
 323   // Sum discovered
 324   ZPerWorkerConstIterator<Counters> iter_discovered(&_discovered_count);
 325   for (const Counters* counters; iter_discovered.next(&counters);) {
 326     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 327       discovered[i] += (*counters)[i];
 328     }
 329   }
 330 
 331   // Sum enqueued
 332   ZPerWorkerConstIterator<Counters> iter_enqueued(&_enqueued_count);
 333   for (const Counters* counters; iter_enqueued.next(&counters);) {
 334     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 335       enqueued[i] += (*counters)[i];
 336     }
 337   }
 338 
 339   // Update statistics
 340   ZStatReferences::set_soft(encountered[REF_SOFT], discovered[REF_SOFT], enqueued[REF_SOFT]);
 341   ZStatReferences::set_weak(encountered[REF_WEAK], discovered[REF_WEAK], enqueued[REF_WEAK]);
 342   ZStatReferences::set_final(encountered[REF_FINAL], discovered[REF_FINAL], enqueued[REF_FINAL]);
 343   ZStatReferences::set_phantom(encountered[REF_PHANTOM], discovered[REF_PHANTOM], enqueued[REF_PHANTOM]);
 344 
 345   // Trace statistics
 346   const ReferenceProcessorStats stats(discovered[REF_SOFT],
 347                                       discovered[REF_WEAK],
 348                                       discovered[REF_FINAL],
 349                                       discovered[REF_PHANTOM]);
 350   ZTracer::tracer()->report_gc_reference_stats(stats);
 351 }
 352 
 353 class ZReferenceProcessorTask : public ZTask {
 354 private:
 355   ZReferenceProcessor* const _reference_processor;
 356 
 357 public:
 358   ZReferenceProcessorTask(ZReferenceProcessor* reference_processor) :
 359       ZTask("ZReferenceProcessorTask"),
 360       _reference_processor(reference_processor) {}
 361 
 362   virtual void work() {
 363     _reference_processor->work();
 364   }
 365 };
 366 
 367 void ZReferenceProcessor::process_references() {
 368   ZStatTimer timer(ZSubPhaseConcurrentReferencesProcess);
 369 
 370   // Process discovered lists
 371   ZReferenceProcessorTask task(this);
 372   _workers->run_concurrent(&task);
 373 
 374   // Update soft reference clock
 375   update_soft_reference_clock();
 376 
 377   // Collect, log and trace statistics
 378   collect_statistics();
 379 }
 380 
 381 void ZReferenceProcessor::enqueue_references() {
 382   ZStatTimer timer(ZSubPhaseConcurrentReferencesEnqueue);
 383 
 384   if (_pending_list.get() == NULL) {
 385     // Nothing to enqueue
 386     return;
 387   }
 388 
 389   {
 390     // Heap_lock protects external pending list
 391     MonitorLockerEx ml(Heap_lock);
 392 
 393     // Prepend internal pending list to external pending list
 394     *_pending_list_tail = Universe::swap_reference_pending_list(_pending_list.get());
 395 
 396     // Notify ReferenceHandler thread
 397     ml.notify_all();
 398   }
 399 
 400   // Reset internal pending list
 401   _pending_list.set(NULL);
 402   _pending_list_tail = _pending_list.addr();
 403 }
 404 
 405 void ZReferenceProcessor::process_and_enqueue_references() {
 406   process_references();
 407   enqueue_references();
 408 }