1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "gc/shared/referencePolicy.hpp"
  27 #include "gc/shared/referenceProcessorStats.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zOopClosures.inline.hpp"
  30 #include "gc/z/zReferenceProcessor.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "gc/z/zTask.hpp"
  33 #include "gc/z/zTracer.inline.hpp"
  34 #include "gc/z/zUtils.inline.hpp"
  35 #include "memory/universe.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 #include "runtime/os.hpp"
  38 
  39 static const ZStatSubPhase ZSubPhaseConcurrentReferencesProcess("Concurrent References Process");
  40 static const ZStatSubPhase ZSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue");
  41 
  42 ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers) :
  43     _workers(workers),
  44     _soft_reference_policy(NULL),
  45     _encountered_count(),
  46     _discovered_count(),
  47     _enqueued_count(),
  48     _discovered_list(NULL),
  49     _pending_list(NULL),
  50     _pending_list_tail(_pending_list.addr()) {}
  51 
  52 void ZReferenceProcessor::set_soft_reference_policy(bool clear) {
  53   static AlwaysClearPolicy always_clear_policy;
  54   static LRUMaxHeapPolicy lru_max_heap_policy;
  55 
  56   if (clear) {
  57     log_info(gc, ref)("Clearing All Soft References");
  58     _soft_reference_policy = &always_clear_policy;
  59   } else {
  60     _soft_reference_policy = &lru_max_heap_policy;
  61   }
  62 
  63   _soft_reference_policy->setup();
  64 }
  65 
  66 void ZReferenceProcessor::update_soft_reference_clock() const {
  67   const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  68   java_lang_ref_SoftReference::set_clock(now);
  69 }
  70 
  71 bool ZReferenceProcessor::is_reference_inactive(oop obj) const {
  72   // A non-null next field means the reference is inactive
  73   return java_lang_ref_Reference::next(obj) != NULL;
  74 }
  75 
  76 ReferenceType ZReferenceProcessor::reference_type(oop obj) const {
  77   return InstanceKlass::cast(obj->klass())->reference_type();
  78 }
  79 
  80 const char* ZReferenceProcessor::reference_type_name(ReferenceType type) const {
  81   switch (type) {
  82   case REF_SOFT:
  83     return "Soft";
  84 
  85   case REF_WEAK:
  86     return "Weak";
  87 
  88   case REF_FINAL:
  89     return "Final";
  90 
  91   case REF_PHANTOM:
  92     return "Phantom";
  93 
  94   default:
  95     ShouldNotReachHere();
  96     return NULL;
  97   }
  98 }
  99 
 100 volatile oop* ZReferenceProcessor::reference_referent_addr(oop obj) const {
 101   return (volatile oop*)java_lang_ref_Reference::referent_addr_raw(obj);
 102 }
 103 
 104 oop ZReferenceProcessor::reference_referent(oop obj) const {
 105   return *reference_referent_addr(obj);
 106 }
 107 
 108 bool ZReferenceProcessor::is_referent_alive_or_null(oop obj, ReferenceType type) const {
 109   volatile oop* const p = reference_referent_addr(obj);
 110 
 111   // Check if the referent is alive or null, in which case we don't want to discover
 112   // the reference. It can only be null if the application called Reference.enqueue()
 113   // or Reference.clear().
 114   if (type == REF_PHANTOM) {
 115     const oop o = ZBarrier::weak_load_barrier_on_phantom_oop_field(p);
 116     return o == NULL || ZHeap::heap()->is_object_live(ZOop::to_address(o));
 117   } else {
 118     const oop o = ZBarrier::weak_load_barrier_on_weak_oop_field(p);
 119     return o == NULL || ZHeap::heap()->is_object_strongly_live(ZOop::to_address(o));
 120   }
 121 }
 122 
 123 bool ZReferenceProcessor::is_referent_softly_alive(oop obj, ReferenceType type) const {
 124   if (type != REF_SOFT) {
 125     // Not a soft reference
 126     return false;
 127   }
 128 
 129   // Ask soft reference policy
 130   const jlong clock = java_lang_ref_SoftReference::clock();
 131   assert(clock != 0, "Clock not initialized");
 132   assert(_soft_reference_policy != NULL, "Policy not initialized");
 133   return !_soft_reference_policy->should_clear_reference(obj, clock);
 134 }
 135 
 136 bool ZReferenceProcessor::should_drop_reference(oop obj, ReferenceType type) const {
 137   // This check is racing with a call to Reference.clear() from the application.
 138   // If the application clears the reference after this check it will still end
 139   // up on the pending list, and there's nothing we can do about that without
 140   // changing the Reference.clear() API. This check is also racing with a call
 141   // to Reference.enqueue() from the application, which is unproblematic, since
 142   // the application wants the reference to be enqueued anyway.
 143   const oop o = reference_referent(obj);
 144   if (o == NULL) {
 145     // Reference has been cleared, by a call to Reference.enqueue()
 146     // or Reference.clear() from the application, which means we
 147     // should drop the reference.
 148     return true;
 149   }
 150 
 151   // Check if the referent is still alive, in which case we should
 152   // drop the reference.
 153   if (type == REF_PHANTOM) {
 154     return ZBarrier::is_alive_barrier_on_phantom_oop(o);
 155   } else {
 156     return ZBarrier::is_alive_barrier_on_weak_oop(o);
 157   }
 158 }
 159 
 160 bool ZReferenceProcessor::should_mark_referent(ReferenceType type) const {
 161   // Referents of final references (and its reachable sub graph) are
 162   // always marked finalizable during discovery. This avoids the problem
 163   // of later having to mark those objects if the referent is still final
 164   // reachable during processing.
 165   return type == REF_FINAL;
 166 }
 167 
 168 bool ZReferenceProcessor::should_clear_referent(ReferenceType type) const {
 169   // Referents that were not marked must be cleared
 170   return !should_mark_referent(type);
 171 }
 172 
 173 void ZReferenceProcessor::keep_referent_alive(oop obj, ReferenceType type) const {
 174   volatile oop* const p = reference_referent_addr(obj);
 175   if (type == REF_PHANTOM) {
 176     ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
 177   } else {
 178     ZBarrier::keep_alive_barrier_on_weak_oop_field(p);
 179   }
 180 }
 181 
 182 bool ZReferenceProcessor::discover_reference(oop obj, ReferenceType type) {
 183   if (!RegisterReferences) {
 184     // Reference processing disabled
 185     return false;
 186   }
 187 
 188   log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(obj), reference_type_name(type));
 189 
 190   // Update statistics
 191   _encountered_count.get()[type]++;
 192 
 193   if (is_reference_inactive(obj) ||
 194       is_referent_alive_or_null(obj, type) ||
 195       is_referent_softly_alive(obj, type)) {
 196     // Not discovered
 197     return false;
 198   }
 199 
 200   discover(obj, type);
 201 
 202   // Discovered
 203   return true;
 204 }
 205 
 206 void ZReferenceProcessor::discover(oop obj, ReferenceType type) {
 207   log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(obj), reference_type_name(type));
 208 
 209   // Update statistics
 210   _discovered_count.get()[type]++;
 211 
 212   // Mark referent finalizable
 213   if (should_mark_referent(type)) {
 214     oop* const referent_addr = (oop*)java_lang_ref_Reference::referent_addr_raw(obj);
 215     ZBarrier::mark_barrier_on_oop_field(referent_addr, true /* finalizable */);
 216   }
 217 
 218   // Add reference to discovered list
 219   assert(java_lang_ref_Reference::discovered(obj) == NULL, "Already discovered");
 220   oop* const list = _discovered_list.addr();
 221   java_lang_ref_Reference::set_discovered(obj, *list);
 222   *list = obj;
 223 }
 224 
 225 oop ZReferenceProcessor::drop(oop obj, ReferenceType type) {
 226   log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(obj), reference_type_name(type));
 227 
 228   // Keep referent alive
 229   keep_referent_alive(obj, type);
 230 
 231   // Unlink and return next in list
 232   const oop next = java_lang_ref_Reference::discovered(obj);
 233   java_lang_ref_Reference::set_discovered(obj, NULL);
 234   return next;
 235 }
 236 
 237 oop* ZReferenceProcessor::keep(oop obj, ReferenceType type) {
 238   log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(obj), reference_type_name(type));
 239 
 240   // Update statistics
 241   _enqueued_count.get()[type]++;
 242 
 243   // Clear referent
 244   if (should_clear_referent(type)) {
 245     java_lang_ref_Reference::set_referent(obj, NULL);
 246   }
 247 
 248   // Make reference inactive by self-looping the next field. We could be racing with a
 249   // call to Reference.enqueue() from the application, which is why we are using a CAS
 250   // to make sure we change the next field only if it is NULL. A failing CAS means the
 251   // reference has already been enqueued. However, we don't check the result of the CAS,
 252   // since we still have no option other than keeping the reference on the pending list.
 253   // It's ok to have the reference both on the pending list and enqueued at the same
 254   // time (the pending list is linked through the discovered field, while the reference
 255   // queue is linked through the next field). When the ReferenceHandler thread later
 256   // calls Reference.enqueue() we detect that it has already been enqueued and drop it.
 257   oop* const next_addr = (oop*)java_lang_ref_Reference::next_addr_raw(obj);
 258   Atomic::cmpxchg(obj, next_addr, oop(NULL));
 259 
 260   // Return next in list
 261   return (oop*)java_lang_ref_Reference::discovered_addr_raw(obj);
 262 }
 263 
 264 void ZReferenceProcessor::work() {
 265   // Process discovered references
 266   oop* const list = _discovered_list.addr();
 267   oop* p = list;
 268 
 269   while (*p != NULL) {
 270     const oop obj = *p;
 271     const ReferenceType type = reference_type(obj);
 272 
 273     if (should_drop_reference(obj, type)) {
 274       *p = drop(obj, type);
 275     } else {
 276       p = keep(obj, type);
 277     }
 278   }
 279 
 280   // Prepend discovered references to internal pending list
 281   if (*list != NULL) {
 282     *p = Atomic::xchg(*list, _pending_list.addr());
 283     if (*p == NULL) {
 284       // First to prepend to list, record tail
 285       _pending_list_tail = p;
 286     }
 287 
 288     // Clear discovered list
 289     *list = NULL;
 290   }
 291 }
 292 
 293 bool ZReferenceProcessor::is_empty() const {
 294   ZPerWorkerConstIterator<oop> iter(&_discovered_list);
 295   for (const oop* list; iter.next(&list);) {
 296     if (*list != NULL) {
 297       return false;
 298     }
 299   }
 300 
 301   if (_pending_list.get() != NULL) {
 302     return false;
 303   }
 304 
 305   return true;
 306 }
 307 
 308 void ZReferenceProcessor::reset_statistics() {
 309   assert(is_empty(), "Should be empty");
 310 
 311   // Reset encountered
 312   ZPerWorkerIterator<Counters> iter_encountered(&_encountered_count);
 313   for (Counters* counters; iter_encountered.next(&counters);) {
 314     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 315       (*counters)[i] = 0;
 316     }
 317   }
 318 
 319   // Reset discovered
 320   ZPerWorkerIterator<Counters> iter_discovered(&_discovered_count);
 321   for (Counters* counters; iter_discovered.next(&counters);) {
 322     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 323       (*counters)[i] = 0;
 324     }
 325   }
 326 
 327   // Reset enqueued
 328   ZPerWorkerIterator<Counters> iter_enqueued(&_enqueued_count);
 329   for (Counters* counters; iter_enqueued.next(&counters);) {
 330     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 331       (*counters)[i] = 0;
 332     }
 333   }
 334 }
 335 
 336 void ZReferenceProcessor::collect_statistics() {
 337   Counters encountered = {};
 338   Counters discovered = {};
 339   Counters enqueued = {};
 340 
 341   // Sum encountered
 342   ZPerWorkerConstIterator<Counters> iter_encountered(&_encountered_count);
 343   for (const Counters* counters; iter_encountered.next(&counters);) {
 344     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 345       encountered[i] += (*counters)[i];
 346     }
 347   }
 348 
 349   // Sum discovered
 350   ZPerWorkerConstIterator<Counters> iter_discovered(&_discovered_count);
 351   for (const Counters* counters; iter_discovered.next(&counters);) {
 352     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 353       discovered[i] += (*counters)[i];
 354     }
 355   }
 356 
 357   // Sum enqueued
 358   ZPerWorkerConstIterator<Counters> iter_enqueued(&_enqueued_count);
 359   for (const Counters* counters; iter_enqueued.next(&counters);) {
 360     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 361       enqueued[i] += (*counters)[i];
 362     }
 363   }
 364 
 365   // Update statistics
 366   ZStatReferences::set_soft(encountered[REF_SOFT], discovered[REF_SOFT], enqueued[REF_SOFT]);
 367   ZStatReferences::set_weak(encountered[REF_WEAK], discovered[REF_WEAK], enqueued[REF_WEAK]);
 368   ZStatReferences::set_final(encountered[REF_FINAL], discovered[REF_FINAL], enqueued[REF_FINAL]);
 369   ZStatReferences::set_phantom(encountered[REF_PHANTOM], discovered[REF_PHANTOM], enqueued[REF_PHANTOM]);
 370 
 371   // Trace statistics
 372   const ReferenceProcessorStats stats(discovered[REF_SOFT],
 373                                       discovered[REF_WEAK],
 374                                       discovered[REF_FINAL],
 375                                       discovered[REF_PHANTOM]);
 376   ZTracer::tracer()->report_gc_reference_stats(stats);
 377 }
 378 
 379 class ZReferenceProcessorTask : public ZTask {
 380 private:
 381   ZReferenceProcessor* const _reference_processor;
 382 
 383 public:
 384   ZReferenceProcessorTask(ZReferenceProcessor* reference_processor) :
 385       ZTask("ZReferenceProcessorTask"),
 386       _reference_processor(reference_processor) {}
 387 
 388   virtual void work() {
 389     _reference_processor->work();
 390   }
 391 };
 392 
 393 void ZReferenceProcessor::process_references() {
 394   ZStatTimer timer(ZSubPhaseConcurrentReferencesProcess);
 395 
 396   // Process discovered lists
 397   ZReferenceProcessorTask task(this);
 398   _workers->run_concurrent(&task);
 399 
 400   // Update soft reference clock
 401   update_soft_reference_clock();
 402 
 403   // Collect, log and trace statistics
 404   collect_statistics();
 405 }
 406 
 407 void ZReferenceProcessor::enqueue_references() {
 408   ZStatTimer timer(ZSubPhaseConcurrentReferencesEnqueue);
 409 
 410   if (_pending_list.get() == NULL) {
 411     // Nothing to enqueue
 412     return;
 413   }
 414 
 415   {
 416     // Heap_lock protects external pending list
 417     MonitorLockerEx ml(Heap_lock);
 418 
 419     // Prepend internal pending list to external pending list
 420     *_pending_list_tail = Universe::swap_reference_pending_list(_pending_list.get());
 421 
 422     // Notify ReferenceHandler thread
 423     ml.notify_all();
 424   }
 425 
 426   // Reset internal pending list
 427   _pending_list.set(NULL);
 428   _pending_list_tail = _pending_list.addr();
 429 }