< prev index next >

src/hotspot/share/gc/shared/referenceProcessor.cpp

Print this page




1014     case REF_NONE:
1015       // we should not reach here if we are an InstanceRefKlass
1016     default:
1017       ShouldNotReachHere();
1018   }
1019   log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
1020   return list;
1021 }
1022 
1023 inline void
1024 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1025                                               oop             obj,
1026                                               HeapWord*       discovered_addr) {
1027   assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1028   // First we must make sure this object is only enqueued once. CAS in a non null
1029   // discovered_addr.
1030   oop current_head = refs_list.head();
1031   // The last ref must have its discovered field pointing to itself.
1032   oop next_discovered = (current_head != NULL) ? current_head : obj;
1033 
1034   oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL));
1035 
1036   if (retest == NULL) {
1037     // This thread just won the right to enqueue the object.
1038     // We have separate lists for enqueueing, so no synchronization
1039     // is necessary.
1040     refs_list.set_head(obj);
1041     refs_list.inc_length(1);
1042 
1043     log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
1044                                p2i(obj), obj->klass()->internal_name());
1045   } else {
1046     // If retest was non NULL, another thread beat us to it:
1047     // The reference has already been discovered...
1048     log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
1049                                p2i(obj), obj->klass()->internal_name());
1050   }
1051 }
1052 
1053 #ifndef PRODUCT
1054 // Non-atomic (i.e. concurrent) discovery might allow us




1014     case REF_NONE:
1015       // we should not reach here if we are an InstanceRefKlass
1016     default:
1017       ShouldNotReachHere();
1018   }
1019   log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
1020   return list;
1021 }
1022 
1023 inline void
1024 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1025                                               oop             obj,
1026                                               HeapWord*       discovered_addr) {
1027   assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1028   // First we must make sure this object is only enqueued once. CAS in a non null
1029   // discovered_addr.
1030   oop current_head = refs_list.head();
1031   // The last ref must have its discovered field pointing to itself.
1032   oop next_discovered = (current_head != NULL) ? current_head : obj;
1033 
1034   oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered);
1035 
1036   if (retest == NULL) {
1037     // This thread just won the right to enqueue the object.
1038     // We have separate lists for enqueueing, so no synchronization
1039     // is necessary.
1040     refs_list.set_head(obj);
1041     refs_list.inc_length(1);
1042 
1043     log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
1044                                p2i(obj), obj->klass()->internal_name());
1045   } else {
1046     // If retest was non NULL, another thread beat us to it:
1047     // The reference has already been discovered...
1048     log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
1049                                p2i(obj), obj->klass()->internal_name());
1050   }
1051 }
1052 
1053 #ifndef PRODUCT
1054 // Non-atomic (i.e. concurrent) discovery might allow us


< prev index next >