18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.inline.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/gcTimer.hpp"
31 #include "gc/shared/gcTraceTime.inline.hpp"
32 #include "gc/shared/referencePolicy.hpp"
33 #include "gc/shared/referenceProcessor.inline.hpp"
34 #include "logging/log.hpp"
35 #include "memory/allocation.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/java.hpp"
39 #include "runtime/jniHandles.hpp"
40
41 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
42 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
43 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0;
44
45 void referenceProcessor_init() {
46 ReferenceProcessor::init_statics();
47 }
48
49 void ReferenceProcessor::init_statics() {
50 // We need a monotonically non-decreasing time in ms but
51 // os::javaTimeMillis() does not guarantee monotonicity.
52 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
53
54 // Initialize the soft ref timestamp clock.
55 _soft_ref_timestamp_clock = now;
56 // Also update the soft ref clock in j.l.r.SoftReference
57 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
309 // in all places in the reference processor where we manipulate the discovered
310 // field we make sure to do the barrier here where we anyway iterate through
311 // all linked Reference objects. Note that it is important to not dirty any
312 // cards during reference processing since this will cause card table
313 // verification to fail for G1.
314 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list));
315
316 oop obj = NULL;
317 oop next_d = refs_list.head();
318 // Walk down the list, self-looping the next field
319 // so that the References are not considered active.
320 while (obj != next_d) {
321 obj = next_d;
322 assert(obj->is_instance(), "should be an instance object");
323 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object");
324 next_d = java_lang_ref_Reference::discovered(obj);
325 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d));
326 assert(java_lang_ref_Reference::next(obj) == NULL,
327 "Reference not active; should not be discovered");
328 // Self-loop next, so as to make Ref not active.
329 java_lang_ref_Reference::set_next_raw(obj, obj);
330 if (next_d != obj) {
331 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
332 } else {
333 // This is the last object.
334 // Swap refs_list into pending list and set obj's
335 // discovered to what we read from the pending list.
336 oop old = Universe::swap_reference_pending_list(refs_list.head());
337 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
338 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
339 }
340 }
341 }
342
343 // Parallel enqueue task
344 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
345 public:
346 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
347 DiscoveredList discovered_refs[],
348 int n_queues)
349 : EnqueueTask(ref_processor, discovered_refs, n_queues)
350 { }
351
352 virtual void work(unsigned int work_id) {
353 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
354 // Simplest first cut: static partitioning.
355 int index = work_id;
356 // The increment on "index" must correspond to the maximum number of queues
357 // (n_queues) with which that ReferenceProcessor was created. That
358 // is because of the "clever" way the discovered references lists were
387 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
388 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
389 oop discovered = java_lang_ref_Reference::discovered(_ref);
390 assert(_discovered_addr && discovered->is_oop_or_null(),
391 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
392 _next = discovered;
393 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
394 _referent = java_lang_ref_Reference::referent(_ref);
395 assert(Universe::heap()->is_in_reserved_or_null(_referent),
396 "Wrong oop found in java.lang.Reference object");
397 assert(allow_null_referent ?
398 _referent->is_oop_or_null()
399 : _referent->is_oop(),
400 "Expected an oop%s for referent field at " PTR_FORMAT,
401 (allow_null_referent ? " or NULL" : ""),
402 p2i(_referent));
403 }
404
405 void DiscoveredListIterator::remove() {
406 assert(_ref->is_oop(), "Dropping a bad reference");
407 oop_store_raw(_discovered_addr, NULL);
408
409 // First _prev_next ref actually points into DiscoveredList (gross).
410 oop new_next;
411 if (_next == _ref) {
412 // At the end of the list, we should make _prev point to itself.
413 // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
414 // and _prev will be NULL.
415 new_next = _prev;
416 } else {
417 new_next = _next;
418 }
419 // Remove Reference object from discovered list. Note that G1 does not need a
420 // pre-barrier here because we know the Reference has already been found/marked,
421 // that's how it ended up in the discovered list in the first place.
422 oop_store_raw(_prev_next, new_next);
423 NOT_PRODUCT(_removed++);
424 _refs_list.dec_length(1);
425 }
426
427 void DiscoveredListIterator::clear_referent() {
428 oop_store_raw(_referent_addr, NULL);
429 }
430
431 // NOTE: process_phase*() are largely similar, and at a high level
432 // merely iterate over the extant list applying a predicate to
433 // each of its elements and possibly removing that element from the
434 // list and applying some further closures to that element.
435 // We should consider the possibility of replacing these
436 // process_phase*() methods by abstracting them into
437 // a single general iterator invocation that receives appropriate
438 // closures that accomplish this work.
439
440 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
441 // referents are not alive, but that should be kept alive for policy reasons.
442 // Keep alive the transitive closure of all such referents.
443 void
444 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
445 ReferencePolicy* policy,
446 BoolObjectClosure* is_alive,
447 OopClosure* keep_alive,
448 VoidClosure* complete_gc) {
874 case REF_NONE:
875 // we should not reach here if we are an InstanceRefKlass
876 default:
877 ShouldNotReachHere();
878 }
879 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
880 return list;
881 }
882
883 inline void
884 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
885 oop obj,
886 HeapWord* discovered_addr) {
887 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
888 // First we must make sure this object is only enqueued once. CAS in a non null
889 // discovered_addr.
890 oop current_head = refs_list.head();
891 // The last ref must have its discovered field pointing to itself.
892 oop next_discovered = (current_head != NULL) ? current_head : obj;
893
894 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
895 NULL);
896 if (retest == NULL) {
897 // This thread just won the right to enqueue the object.
898 // We have separate lists for enqueueing, so no synchronization
899 // is necessary.
900 refs_list.set_head(obj);
901 refs_list.inc_length(1);
902
903 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
904 p2i(obj), obj->klass()->internal_name());
905 } else {
906 // If retest was non NULL, another thread beat us to it:
907 // The reference has already been discovered...
908 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
909 p2i(obj), obj->klass()->internal_name());
910 }
911 }
912
913 #ifndef PRODUCT
914 // Non-atomic (i.e. concurrent) discovery might allow us
915 // to observe j.l.References with NULL referents, being those
1033 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1034 _span.contains(obj_addr), "code inconsistency");
1035 }
1036
1037 // Get the right type of discovered queue head.
1038 DiscoveredList* list = get_discovered_list(rt);
1039 if (list == NULL) {
1040 return false; // nothing special needs to be done
1041 }
1042
1043 if (_discovery_is_mt) {
1044 add_to_discovered_list_mt(*list, obj, discovered_addr);
1045 } else {
1046 // We do a raw store here: the field will be visited later when processing
1047 // the discovered references.
1048 oop current_head = list->head();
1049 // The last ref must have its discovered field pointing to itself.
1050 oop next_discovered = (current_head != NULL) ? current_head : obj;
1051
1052 assert(discovered == NULL, "control point invariant");
1053 oop_store_raw(discovered_addr, next_discovered);
1054 list->set_head(obj);
1055 list->inc_length(1);
1056
1057 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name());
1058 }
1059 assert(obj->is_oop(), "Discovered a bad reference");
1060 verify_referent(obj);
1061 return true;
1062 }
1063
1064 bool ReferenceProcessor::has_discovered_references() {
1065 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
1066 if (!_discovered_refs[i].is_empty()) {
1067 return true;
1068 }
1069 }
1070 return false;
1071 }
1072
1073 // Preclean the discovered references by removing those
1179 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
1180 iter.removed(), iter.processed(), p2i(&refs_list));
1181 }
1182 )
1183 }
1184
1185 const char* ReferenceProcessor::list_name(uint i) {
1186 assert(i <= _max_num_q * number_of_subclasses_of_ref(),
1187 "Out of bounds index");
1188
1189 int j = i / _max_num_q;
1190 switch (j) {
1191 case 0: return "SoftRef";
1192 case 1: return "WeakRef";
1193 case 2: return "FinalRef";
1194 case 3: return "PhantomRef";
1195 }
1196 ShouldNotReachHere();
1197 return NULL;
1198 }
1199
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.inline.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/gcTimer.hpp"
31 #include "gc/shared/gcTraceTime.inline.hpp"
32 #include "gc/shared/referencePolicy.hpp"
33 #include "gc/shared/referenceProcessor.inline.hpp"
34 #include "logging/log.hpp"
35 #include "memory/allocation.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/access.inline.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/jniHandles.hpp"
41
42 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
43 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
44 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0;
45
46 void referenceProcessor_init() {
47 ReferenceProcessor::init_statics();
48 }
49
50 void ReferenceProcessor::init_statics() {
51 // We need a monotonically non-decreasing time in ms but
52 // os::javaTimeMillis() does not guarantee monotonicity.
53 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
54
55 // Initialize the soft ref timestamp clock.
56 _soft_ref_timestamp_clock = now;
57 // Also update the soft ref clock in j.l.r.SoftReference
58 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
310 // in all places in the reference processor where we manipulate the discovered
311 // field we make sure to do the barrier here where we anyway iterate through
312 // all linked Reference objects. Note that it is important to not dirty any
313 // cards during reference processing since this will cause card table
314 // verification to fail for G1.
315 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list));
316
317 oop obj = NULL;
318 oop next_d = refs_list.head();
319 // Walk down the list, self-looping the next field
320 // so that the References are not considered active.
321 while (obj != next_d) {
322 obj = next_d;
323 assert(obj->is_instance(), "should be an instance object");
324 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object");
325 next_d = java_lang_ref_Reference::discovered(obj);
326 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d));
327 assert(java_lang_ref_Reference::next(obj) == NULL,
328 "Reference not active; should not be discovered");
329 // Self-loop next, so as to make Ref not active.
330 HeapAccess<>::oop_store_at(obj, java_lang_ref_Reference::next_offset, obj);
331 if (next_d != obj) {
332 HeapAccess<VALUE_NOT_NULL>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, next_d);
333 } else {
334 // This is the last object.
335 // Swap refs_list into pending list and set obj's
336 // discovered to what we read from the pending list.
337 oop old = Universe::swap_reference_pending_list(refs_list.head());
338 HeapAccess<>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, old);
339 }
340 }
341 }
342
343 // Parallel enqueue task
344 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
345 public:
346 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
347 DiscoveredList discovered_refs[],
348 int n_queues)
349 : EnqueueTask(ref_processor, discovered_refs, n_queues)
350 { }
351
352 virtual void work(unsigned int work_id) {
353 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
354 // Simplest first cut: static partitioning.
355 int index = work_id;
356 // The increment on "index" must correspond to the maximum number of queues
357 // (n_queues) with which that ReferenceProcessor was created. That
358 // is because of the "clever" way the discovered references lists were
387 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
388 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
389 oop discovered = java_lang_ref_Reference::discovered(_ref);
390 assert(_discovered_addr && discovered->is_oop_or_null(),
391 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
392 _next = discovered;
393 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
394 _referent = java_lang_ref_Reference::referent(_ref);
395 assert(Universe::heap()->is_in_reserved_or_null(_referent),
396 "Wrong oop found in java.lang.Reference object");
397 assert(allow_null_referent ?
398 _referent->is_oop_or_null()
399 : _referent->is_oop(),
400 "Expected an oop%s for referent field at " PTR_FORMAT,
401 (allow_null_referent ? " or NULL" : ""),
402 p2i(_referent));
403 }
404
405 void DiscoveredListIterator::remove() {
406 assert(_ref->is_oop(), "Dropping a bad reference");
407 BasicAccess<>::oop_store(_discovered_addr, oop(NULL));
408
409 // First _prev_next ref actually points into DiscoveredList (gross).
410 oop new_next;
411 if (_next == _ref) {
412 // At the end of the list, we should make _prev point to itself.
413 // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
414 // and _prev will be NULL.
415 new_next = _prev;
416 } else {
417 new_next = _next;
418 }
419 // Remove Reference object from discovered list. Note that G1 does not need a
420 // pre-barrier here because we know the Reference has already been found/marked,
421 // that's how it ended up in the discovered list in the first place.
422 BasicAccess<>::oop_store(_prev_next, new_next);
423 NOT_PRODUCT(_removed++);
424 _refs_list.dec_length(1);
425 }
426
427 void DiscoveredListIterator::clear_referent() {
428 BasicAccess<>::oop_store(_referent_addr, oop(NULL));
429 }
430
431 // NOTE: process_phase*() are largely similar, and at a high level
432 // merely iterate over the extant list applying a predicate to
433 // each of its elements and possibly removing that element from the
434 // list and applying some further closures to that element.
435 // We should consider the possibility of replacing these
436 // process_phase*() methods by abstracting them into
437 // a single general iterator invocation that receives appropriate
438 // closures that accomplish this work.
439
440 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
441 // referents are not alive, but that should be kept alive for policy reasons.
442 // Keep alive the transitive closure of all such referents.
443 void
444 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
445 ReferencePolicy* policy,
446 BoolObjectClosure* is_alive,
447 OopClosure* keep_alive,
448 VoidClosure* complete_gc) {
874 case REF_NONE:
875 // we should not reach here if we are an InstanceRefKlass
876 default:
877 ShouldNotReachHere();
878 }
879 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
880 return list;
881 }
882
883 inline void
884 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
885 oop obj,
886 HeapWord* discovered_addr) {
887 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
888 // First we must make sure this object is only enqueued once. CAS in a non null
889 // discovered_addr.
890 oop current_head = refs_list.head();
891 // The last ref must have its discovered field pointing to itself.
892 oop next_discovered = (current_head != NULL) ? current_head : obj;
893
894 oop retest = HeapAccess<MO_SEQ_CST | ACCESS_WEAK>::oop_cas(next_discovered, discovered_addr, oop(NULL));
895
896 if (retest == NULL) {
897 // This thread just won the right to enqueue the object.
898 // We have separate lists for enqueueing, so no synchronization
899 // is necessary.
900 refs_list.set_head(obj);
901 refs_list.inc_length(1);
902
903 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
904 p2i(obj), obj->klass()->internal_name());
905 } else {
906 // If retest was non NULL, another thread beat us to it:
907 // The reference has already been discovered...
908 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
909 p2i(obj), obj->klass()->internal_name());
910 }
911 }
912
913 #ifndef PRODUCT
914 // Non-atomic (i.e. concurrent) discovery might allow us
915 // to observe j.l.References with NULL referents, being those
1033 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1034 _span.contains(obj_addr), "code inconsistency");
1035 }
1036
1037 // Get the right type of discovered queue head.
1038 DiscoveredList* list = get_discovered_list(rt);
1039 if (list == NULL) {
1040 return false; // nothing special needs to be done
1041 }
1042
1043 if (_discovery_is_mt) {
1044 add_to_discovered_list_mt(*list, obj, discovered_addr);
1045 } else {
1046 // We do a raw store here: the field will be visited later when processing
1047 // the discovered references.
1048 oop current_head = list->head();
1049 // The last ref must have its discovered field pointing to itself.
1050 oop next_discovered = (current_head != NULL) ? current_head : obj;
1051
1052 assert(discovered == NULL, "control point invariant");
1053 BasicAccess<>::oop_store(discovered_addr, next_discovered);
1054 list->set_head(obj);
1055 list->inc_length(1);
1056
1057 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name());
1058 }
1059 assert(obj->is_oop(), "Discovered a bad reference");
1060 verify_referent(obj);
1061 return true;
1062 }
1063
1064 bool ReferenceProcessor::has_discovered_references() {
1065 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
1066 if (!_discovered_refs[i].is_empty()) {
1067 return true;
1068 }
1069 }
1070 return false;
1071 }
1072
1073 // Preclean the discovered references by removing those
1179 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
1180 iter.removed(), iter.processed(), p2i(&refs_list));
1181 }
1182 )
1183 }
1184
1185 const char* ReferenceProcessor::list_name(uint i) {
1186 assert(i <= _max_num_q * number_of_subclasses_of_ref(),
1187 "Out of bounds index");
1188
1189 int j = i / _max_num_q;
1190 switch (j) {
1191 case 0: return "SoftRef";
1192 case 1: return "WeakRef";
1193 case 2: return "FinalRef";
1194 case 3: return "PhantomRef";
1195 }
1196 ShouldNotReachHere();
1197 return NULL;
1198 }
|