src/share/vm/memory/referenceProcessor.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/memory/referenceProcessor.cpp	Wed Jan 15 01:42:06 2014
--- new/src/share/vm/memory/referenceProcessor.cpp	Wed Jan 15 01:42:06 2014

*** 43,53 **** --- 43,53 ---- void referenceProcessor_init() { ReferenceProcessor::init_statics(); } void ReferenceProcessor::init_statics() { ! // We need a monotonically non-deccreasing time in ms but ! // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; // Initialize the soft ref timestamp clock. _soft_ref_timestamp_clock = now;
*** 155,165 **** --- 155,165 ---- void ReferenceProcessor::update_soft_ref_master_clock() { // Update (advance) the soft ref master clock field. This must be done // after processing the soft ref list. ! // We need a monotonically non-deccreasing time in ms but ! // We need a monotonically non-decreasing time in ms but // os::javaTimeMillis() does not guarantee monotonicity. jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync");
*** 171,181 **** --- 171,181 ---- ) // The values of now and _soft_ref_timestamp_clock are set using // javaTimeNanos(), which is guaranteed to be monotonically // non-decreasing provided the underlying platform provides such // a time source (and it is bug free). ! // In product mode, however, protect ourselves from non-monotonicty. ! // In product mode, however, protect ourselves from non-monotonicity. if (now > _soft_ref_timestamp_clock) { _soft_ref_timestamp_clock = now; java_lang_ref_SoftReference::set_clock(now); } // Else leave clock stalled at its old value until time progresses
*** 356,366 **** --- 356,366 ---- INTPTR_FORMAT, (address)refs_list.head()); } oop obj = NULL; oop next_d = refs_list.head(); ! if (pending_list_uses_discovered_field()) { // New behaviour ! if (pending_list_uses_discovered_field()) { // New behavior // Walk down the list, self-looping the next field // so that the References are not considered active. while (obj != next_d) { obj = next_d; assert(obj->is_instanceRef(), "should be reference object");
*** 372,391 **** --- 372,391 ---- assert(java_lang_ref_Reference::next(obj) == NULL, "Reference not active; should not be discovered"); // Self-loop next, so as to make Ref not active. java_lang_ref_Reference::set_next(obj, obj); if (next_d == obj) { // obj is last ! // Swap refs_list into pendling_list_addr and ! // Swap refs_list into pending_list_addr and // set obj's discovered to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); // Need oop_check on pending_list_addr above; // see special oop-check code at the end of // enqueue_discovered_reflists() further below. java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL } } ! } else { // Old behaviour ! } else { // Old behavior // Walk down the list, copying the discovered field into // the next field and clearing the discovered field. while (obj != next_d) { obj = next_d; assert(obj->is_instanceRef(), "should be reference object");
*** 395,405 **** --- 395,405 ---- (void *)obj, (void *)next_d); } assert(java_lang_ref_Reference::next(obj) == NULL, "The reference should not be enqueued"); if (next_d == obj) { // obj is last ! // Swap refs_list into pendling_list_addr and ! // Swap refs_list into pending_list_addr and // set obj's next to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); // Need oop_check on pending_list_addr above; // see special oop-check code at the end of // enqueue_discovered_reflists() further below.
*** 1349,1359 **** --- 1349,1359 ---- // Walk the given discovered ref list, and remove all reference objects // whose referents are still alive, whose referents are NULL or which // are not active (have a non-NULL next field). NOTE: When we are // thus precleaning the ref lists (which happens single-threaded today), ! // we do not disable refs discovery to honour the correct semantics of ! // we do not disable refs discovery to honor the correct semantics of // java.lang.Reference. As a result, we need to be careful below // that ref removal steps interleave safely with ref discovery steps // (in this thread). void ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,

src/share/vm/memory/referenceProcessor.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File