1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "gc/shared/referencePolicy.hpp"
  27 #include "gc/shared/referenceProcessorStats.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zOopClosures.inline.hpp"
  30 #include "gc/z/zReferenceProcessor.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "gc/z/zTask.hpp"
  33 #include "gc/z/zTracer.inline.hpp"
  34 #include "gc/z/zUtils.inline.hpp"
  35 #include "gc/z/zValue.inline.hpp"
  36 #include "memory/universe.hpp"
  37 #include "runtime/mutexLocker.hpp"
  38 #include "runtime/os.hpp"
  39 
  40 static const ZStatSubPhase ZSubPhaseConcurrentReferencesProcess("Concurrent References Process");
  41 static const ZStatSubPhase ZSubPhaseConcurrentReferencesEnqueue("Concurrent References Enqueue");
  42 
  43 static ReferenceType reference_type(oop reference) {
  44   return InstanceKlass::cast(reference->klass())->reference_type();
  45 }
  46 
  47 static const char* reference_type_name(ReferenceType type) {
  48   switch (type) {
  49   case REF_SOFT:
  50     return "Soft";
  51 
  52   case REF_WEAK:
  53     return "Weak";
  54 
  55   case REF_FINAL:
  56     return "Final";
  57 
  58   case REF_PHANTOM:
  59     return "Phantom";
  60 
  61   default:
  62     ShouldNotReachHere();
  63     return NULL;
  64   }
  65 }
  66 
  67 static volatile oop* reference_referent_addr(oop reference) {
  68   return (volatile oop*)java_lang_ref_Reference::referent_addr_raw(reference);
  69 }
  70 
  71 static oop reference_referent(oop reference) {
  72   return *reference_referent_addr(reference);
  73 }
  74 
  75 static void reference_set_referent(oop reference, oop referent) {
  76   java_lang_ref_Reference::set_referent_raw(reference, referent);
  77 }
  78 
  79 static oop* reference_discovered_addr(oop reference) {
  80   return (oop*)java_lang_ref_Reference::discovered_addr_raw(reference);
  81 }
  82 
  83 static oop reference_discovered(oop reference) {
  84   return *reference_discovered_addr(reference);
  85 }
  86 
  87 static void reference_set_discovered(oop reference, oop discovered) {
  88   java_lang_ref_Reference::set_discovered_raw(reference, discovered);
  89 }
  90 
  91 static oop* reference_next_addr(oop reference) {
  92   return (oop*)java_lang_ref_Reference::next_addr_raw(reference);
  93 }
  94 
  95 static oop reference_next(oop reference) {
  96   return *reference_next_addr(reference);
  97 }
  98 
  99 static void reference_set_next(oop reference, oop next) {
 100   java_lang_ref_Reference::set_next_raw(reference, next);
 101 }
 102 
 103 static void soft_reference_update_clock() {
 104   const jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 105   java_lang_ref_SoftReference::set_clock(now);
 106 }
 107 
 108 ZReferenceProcessor::ZReferenceProcessor(ZWorkers* workers) :
 109     _workers(workers),
 110     _soft_reference_policy(NULL),
 111     _encountered_count(),
 112     _discovered_count(),
 113     _enqueued_count(),
 114     _discovered_list(NULL),
 115     _pending_list(NULL),
 116     _pending_list_tail(_pending_list.addr()) {}
 117 
 118 void ZReferenceProcessor::set_soft_reference_policy(bool clear) {
 119   static AlwaysClearPolicy always_clear_policy;
 120   static LRUMaxHeapPolicy lru_max_heap_policy;
 121 
 122   if (clear) {
 123     log_info(gc, ref)("Clearing All SoftReferences");
 124     _soft_reference_policy = &always_clear_policy;
 125   } else {
 126     _soft_reference_policy = &lru_max_heap_policy;
 127   }
 128 
 129   _soft_reference_policy->setup();
 130 }
 131 
 132 bool ZReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const {
 133   if (type == REF_FINAL) {
 134     // A FinalReference is inactive if its next field is non-null. An application can't
 135     // call enqueue() or clear() on a FinalReference.
 136     return reference_next(reference) != NULL;
 137   } else {
 138     // A non-FinalReference is inactive if the referent is null. The referent can only
 139     // be null if the application called Reference.enqueue() or Reference.clear().
 140     return referent == NULL;
 141   }
 142 }
 143 
 144 bool ZReferenceProcessor::is_strongly_live(oop referent) const {
 145   return ZHeap::heap()->is_object_strongly_live(ZOop::to_address(referent));
 146 }
 147 
 148 bool ZReferenceProcessor::is_softly_live(oop reference, ReferenceType type) const {
 149   if (type != REF_SOFT) {
 150     // Not a SoftReference
 151     return false;
 152   }
 153 
 154   // Ask SoftReference policy
 155   const jlong clock = java_lang_ref_SoftReference::clock();
 156   assert(clock != 0, "Clock not initialized");
 157   assert(_soft_reference_policy != NULL, "Policy not initialized");
 158   return !_soft_reference_policy->should_clear_reference(reference, clock);
 159 }
 160 
 161 bool ZReferenceProcessor::should_discover(oop reference, ReferenceType type) const {
 162   volatile oop* const referent_addr = reference_referent_addr(reference);
 163   const oop referent = ZBarrier::weak_load_barrier_on_oop_field(referent_addr);
 164 
 165   if (is_inactive(reference, referent, type)) {
 166     return false;
 167   }
 168 
 169   if (is_strongly_live(referent)) {
 170     return false;
 171   }
 172 
 173   if (is_softly_live(reference, type)) {
 174     return false;
 175   }
 176 
 177   // PhantomReferences with finalizable marked referents should technically not have
 178   // to be discovered. However, InstanceRefKlass::oop_oop_iterate_ref_processing()
 179   // does not know about the finalizable mark concept, and will therefore mark
 180   // referents in non-discovered PhantomReferences as strongly live. To prevent
 181   // this, we always discover PhantomReferences with finalizable marked referents.
 182   // They will automatically be dropped during the reference processing phase.
 183   return true;
 184 }
 185 
 186 bool ZReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
 187   // This check is racing with a call to Reference.clear() from the application.
 188   // If the application clears the reference after this check it will still end
 189   // up on the pending list, and there's nothing we can do about that without
 190   // changing the Reference.clear() API. This check is also racing with a call
 191   // to Reference.enqueue() from the application, which is unproblematic, since
 192   // the application wants the reference to be enqueued anyway.
 193   const oop referent = reference_referent(reference);
 194   if (referent == NULL) {
 195     // Reference has been cleared, by a call to Reference.enqueue()
 196     // or Reference.clear() from the application, which means we
 197     // should drop the reference.
 198     return true;
 199   }
 200 
 201   // Check if the referent is still alive, in which case we should
 202   // drop the reference.
 203   if (type == REF_PHANTOM) {
 204     return ZBarrier::is_alive_barrier_on_phantom_oop(referent);
 205   } else {
 206     return ZBarrier::is_alive_barrier_on_weak_oop(referent);
 207   }
 208 }
 209 
 210 void ZReferenceProcessor::keep_alive(oop reference, ReferenceType type) const {
 211   volatile oop* const p = reference_referent_addr(reference);
 212   if (type == REF_PHANTOM) {
 213     ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
 214   } else {
 215     ZBarrier::keep_alive_barrier_on_weak_oop_field(p);
 216   }
 217 }
 218 
 219 void ZReferenceProcessor::make_inactive(oop reference, ReferenceType type) const {
 220   if (type == REF_FINAL) {
 221     // Don't clear referent. It is needed by the Finalizer thread to make the call
 222     // to finalize(). A FinalReference is instead made inactive by self-looping the
 223     // next field. An application can't call FinalReference.enqueue(), so there is
 224     // no race to worry about when setting the next field.
 225     assert(reference_next(reference) == NULL, "Already inactive");
 226     reference_set_next(reference, reference);
 227   } else {
 228     // Clear referent
 229     reference_set_referent(reference, NULL);
 230   }
 231 }
 232 
 233 void ZReferenceProcessor::discover(oop reference, ReferenceType type) {
 234   log_trace(gc, ref)("Discovered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
 235 
 236   // Update statistics
 237   _discovered_count.get()[type]++;
 238 
 239   if (type == REF_FINAL) {
 240     // Mark referent (and its reachable subgraph) finalizable. This avoids
 241     // the problem of later having to mark those objects if the referent is
 242     // still final reachable during processing.
 243     volatile oop* const referent_addr = reference_referent_addr(reference);
 244     ZBarrier::mark_barrier_on_oop_field(referent_addr, true /* finalizable */);
 245   }
 246 
 247   // Add reference to discovered list
 248   assert(reference_discovered(reference) == NULL, "Already discovered");
 249   oop* const list = _discovered_list.addr();
 250   reference_set_discovered(reference, *list);
 251   *list = reference;
 252 }
 253 
 254 bool ZReferenceProcessor::discover_reference(oop reference, ReferenceType type) {
 255   if (!RegisterReferences) {
 256     // Reference processing disabled
 257     return false;
 258   }
 259 
 260   log_trace(gc, ref)("Encountered Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
 261 
 262   // Update statistics
 263   _encountered_count.get()[type]++;
 264 
 265   if (!should_discover(reference, type)) {
 266     // Not discovered
 267     return false;
 268   }
 269 
 270   discover(reference, type);
 271 
 272   // Discovered
 273   return true;
 274 }
 275 
 276 oop ZReferenceProcessor::drop(oop reference, ReferenceType type) {
 277   log_trace(gc, ref)("Dropped Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
 278 
 279   // Keep referent alive
 280   keep_alive(reference, type);
 281 
 282   // Unlink and return next in list
 283   const oop next = reference_discovered(reference);
 284   reference_set_discovered(reference, NULL);
 285   return next;
 286 }
 287 
 288 oop* ZReferenceProcessor::keep(oop reference, ReferenceType type) {
 289   log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type));
 290 
 291   // Update statistics
 292   _enqueued_count.get()[type]++;
 293 
 294   // Make reference inactive
 295   make_inactive(reference, type);
 296 
 297   // Return next in list
 298   return reference_discovered_addr(reference);
 299 }
 300 
 301 void ZReferenceProcessor::work() {
 302   // Process discovered references
 303   oop* const list = _discovered_list.addr();
 304   oop* p = list;
 305 
 306   while (*p != NULL) {
 307     const oop reference = *p;
 308     const ReferenceType type = reference_type(reference);
 309 
 310     if (should_drop(reference, type)) {
 311       *p = drop(reference, type);
 312     } else {
 313       p = keep(reference, type);
 314     }
 315   }
 316 
 317   // Prepend discovered references to internal pending list
 318   if (*list != NULL) {
 319     *p = Atomic::xchg(_pending_list.addr(), *list);
 320     if (*p == NULL) {
 321       // First to prepend to list, record tail
 322       _pending_list_tail = p;
 323     }
 324 
 325     // Clear discovered list
 326     *list = NULL;
 327   }
 328 }
 329 
 330 bool ZReferenceProcessor::is_empty() const {
 331   ZPerWorkerConstIterator<oop> iter(&_discovered_list);
 332   for (const oop* list; iter.next(&list);) {
 333     if (*list != NULL) {
 334       return false;
 335     }
 336   }
 337 
 338   if (_pending_list.get() != NULL) {
 339     return false;
 340   }
 341 
 342   return true;
 343 }
 344 
 345 void ZReferenceProcessor::reset_statistics() {
 346   assert(is_empty(), "Should be empty");
 347 
 348   // Reset encountered
 349   ZPerWorkerIterator<Counters> iter_encountered(&_encountered_count);
 350   for (Counters* counters; iter_encountered.next(&counters);) {
 351     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 352       (*counters)[i] = 0;
 353     }
 354   }
 355 
 356   // Reset discovered
 357   ZPerWorkerIterator<Counters> iter_discovered(&_discovered_count);
 358   for (Counters* counters; iter_discovered.next(&counters);) {
 359     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 360       (*counters)[i] = 0;
 361     }
 362   }
 363 
 364   // Reset enqueued
 365   ZPerWorkerIterator<Counters> iter_enqueued(&_enqueued_count);
 366   for (Counters* counters; iter_enqueued.next(&counters);) {
 367     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 368       (*counters)[i] = 0;
 369     }
 370   }
 371 }
 372 
 373 void ZReferenceProcessor::collect_statistics() {
 374   Counters encountered = {};
 375   Counters discovered = {};
 376   Counters enqueued = {};
 377 
 378   // Sum encountered
 379   ZPerWorkerConstIterator<Counters> iter_encountered(&_encountered_count);
 380   for (const Counters* counters; iter_encountered.next(&counters);) {
 381     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 382       encountered[i] += (*counters)[i];
 383     }
 384   }
 385 
 386   // Sum discovered
 387   ZPerWorkerConstIterator<Counters> iter_discovered(&_discovered_count);
 388   for (const Counters* counters; iter_discovered.next(&counters);) {
 389     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 390       discovered[i] += (*counters)[i];
 391     }
 392   }
 393 
 394   // Sum enqueued
 395   ZPerWorkerConstIterator<Counters> iter_enqueued(&_enqueued_count);
 396   for (const Counters* counters; iter_enqueued.next(&counters);) {
 397     for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
 398       enqueued[i] += (*counters)[i];
 399     }
 400   }
 401 
 402   // Update statistics
 403   ZStatReferences::set_soft(encountered[REF_SOFT], discovered[REF_SOFT], enqueued[REF_SOFT]);
 404   ZStatReferences::set_weak(encountered[REF_WEAK], discovered[REF_WEAK], enqueued[REF_WEAK]);
 405   ZStatReferences::set_final(encountered[REF_FINAL], discovered[REF_FINAL], enqueued[REF_FINAL]);
 406   ZStatReferences::set_phantom(encountered[REF_PHANTOM], discovered[REF_PHANTOM], enqueued[REF_PHANTOM]);
 407 
 408   // Trace statistics
 409   const ReferenceProcessorStats stats(discovered[REF_SOFT],
 410                                       discovered[REF_WEAK],
 411                                       discovered[REF_FINAL],
 412                                       discovered[REF_PHANTOM]);
 413   ZTracer::tracer()->report_gc_reference_stats(stats);
 414 }
 415 
 416 class ZReferenceProcessorTask : public ZTask {
 417 private:
 418   ZReferenceProcessor* const _reference_processor;
 419 
 420 public:
 421   ZReferenceProcessorTask(ZReferenceProcessor* reference_processor) :
 422       ZTask("ZReferenceProcessorTask"),
 423       _reference_processor(reference_processor) {}
 424 
 425   virtual void work() {
 426     _reference_processor->work();
 427   }
 428 };
 429 
 430 void ZReferenceProcessor::process_references() {
 431   ZStatTimer timer(ZSubPhaseConcurrentReferencesProcess);
 432 
 433   // Process discovered lists
 434   ZReferenceProcessorTask task(this);
 435   _workers->run_concurrent(&task);
 436 
 437   // Update SoftReference clock
 438   soft_reference_update_clock();
 439 
 440   // Collect, log and trace statistics
 441   collect_statistics();
 442 }
 443 
 444 void ZReferenceProcessor::enqueue_references() {
 445   ZStatTimer timer(ZSubPhaseConcurrentReferencesEnqueue);
 446 
 447   if (_pending_list.get() == NULL) {
 448     // Nothing to enqueue
 449     return;
 450   }
 451 
 452   {
 453     // Heap_lock protects external pending list
 454     MonitorLocker ml(Heap_lock);
 455 
 456     // Prepend internal pending list to external pending list
 457     *_pending_list_tail = Universe::swap_reference_pending_list(_pending_list.get());
 458 
 459     // Notify ReferenceHandler thread
 460     ml.notify_all();
 461   }
 462 
 463   // Reset internal pending list
 464   _pending_list.set(NULL);
 465   _pending_list_tail = _pending_list.addr();
 466 }