20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc_implementation/shared/gcTimer.hpp"
29 #include "gc_implementation/shared/gcTraceTime.hpp"
30 #include "gc_interface/collectedHeap.hpp"
31 #include "gc_interface/collectedHeap.inline.hpp"
32 #include "memory/referencePolicy.hpp"
33 #include "memory/referenceProcessor.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "runtime/java.hpp"
36 #include "runtime/jniHandles.hpp"
37
38 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
39 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
40 bool ReferenceProcessor::_pending_list_uses_discovered_field = false;
41 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0;
42
43 void referenceProcessor_init() {
44 ReferenceProcessor::init_statics();
45 }
46
47 void ReferenceProcessor::init_statics() {
48 // We need a monotonically non-decreasing time in ms but
49 // os::javaTimeMillis() does not guarantee monotonicity.
50 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
51
52 // Initialize the soft ref timestamp clock.
53 _soft_ref_timestamp_clock = now;
54 // Also update the soft ref clock in j.l.r.SoftReference
55 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
56
57 _always_clear_soft_ref_policy = new AlwaysClearPolicy();
58 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
59 NOT_COMPILER2(LRUCurrentHeapPolicy());
60 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
61 vm_exit_during_initialization("Could not allocate reference policy object");
62 }
63 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
64 RefDiscoveryPolicy == ReferentBasedDiscovery,
65 "Unrecognized RefDiscoveryPolicy");
66 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
67 }
68
69 void ReferenceProcessor::enable_discovery(bool check_no_refs) {
70 #ifdef ASSERT
71 // Verify that we're not currently discovering refs
72 assert(!_discovering_refs, "nested call?");
73
74 if (check_no_refs) {
75 // Verify that the discovered lists are empty
76 verify_no_references_recorded();
77 }
78 #endif // ASSERT
79
80 // Someone could have modified the value of the static
81 // field in the j.l.r.SoftReference class that holds the
82 // soft reference timestamp clock using reflection or
83 // Unsafe between GCs. Unconditionally update the static
84 // field in ReferenceProcessor here so that we use the new
85 // value during reference discovery.
86
336 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
337 } else {
338 return enqueue_discovered_ref_helper<oop>(this, task_executor);
339 }
340 }
341
342 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
343 HeapWord* pending_list_addr) {
344 // Given a list of refs linked through the "discovered" field
345 // (java.lang.ref.Reference.discovered), self-loop their "next" field
346 // thus distinguishing them from active References, then
347 // prepend them to the pending list.
348 //
349 // The Java threads will see the Reference objects linked together through
350 // the discovered field. Instead of trying to do the write barrier updates
351 // in all places in the reference processor where we manipulate the discovered
352 // field we make sure to do the barrier here where we anyway iterate through
353 // all linked Reference objects. Note that it is important to not dirty any
354 // cards during reference processing since this will cause card table
355 // verification to fail for G1.
356 //
357 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
358 // the "next" field is used to chain the pending list, not the discovered
359 // field.
360 if (TraceReferenceGC && PrintGCDetails) {
361 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
362 INTPTR_FORMAT, p2i(refs_list.head()));
363 }
364
365 oop obj = NULL;
366 oop next_d = refs_list.head();
367 if (pending_list_uses_discovered_field()) { // New behavior
368 // Walk down the list, self-looping the next field
369 // so that the References are not considered active.
370 while (obj != next_d) {
371 obj = next_d;
372 assert(obj->is_instanceRef(), "should be reference object");
373 next_d = java_lang_ref_Reference::discovered(obj);
374 if (TraceReferenceGC && PrintGCDetails) {
375 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
376 p2i(obj), p2i(next_d));
377 }
378 assert(java_lang_ref_Reference::next(obj) == NULL,
379 "Reference not active; should not be discovered");
380 // Self-loop next, so as to make Ref not active.
381 java_lang_ref_Reference::set_next_raw(obj, obj);
382 if (next_d != obj) {
383 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
384 } else {
385 // This is the last object.
386 // Swap refs_list into pending_list_addr and
387 // set obj's discovered to what we read from pending_list_addr.
388 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
389 // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above.
390 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
391 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
392 }
393 }
394 } else { // Old behavior
395 // Walk down the list, copying the discovered field into
396 // the next field and clearing the discovered field.
397 while (obj != next_d) {
398 obj = next_d;
399 assert(obj->is_instanceRef(), "should be reference object");
400 next_d = java_lang_ref_Reference::discovered(obj);
401 if (TraceReferenceGC && PrintGCDetails) {
402 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
403 p2i(obj), p2i(next_d));
404 }
405 assert(java_lang_ref_Reference::next(obj) == NULL,
406 "The reference should not be enqueued");
407 if (next_d == obj) { // obj is last
408 // Swap refs_list into pending_list_addr and
409 // set obj's next to what we read from pending_list_addr.
410 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
411 // Need oop_check on pending_list_addr above;
412 // see special oop-check code at the end of
413 // enqueue_discovered_reflists() further below.
414 if (old == NULL) {
415 // obj should be made to point to itself, since
416 // pending list was empty.
417 java_lang_ref_Reference::set_next(obj, obj);
418 } else {
419 java_lang_ref_Reference::set_next(obj, old);
420 }
421 } else {
422 java_lang_ref_Reference::set_next(obj, next_d);
423 }
424 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
425 }
426 }
427 }
428
429 // Parallel enqueue task
430 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
431 public:
432 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
433 DiscoveredList discovered_refs[],
434 HeapWord* pending_list_addr,
435 int n_queues)
436 : EnqueueTask(ref_processor, discovered_refs,
437 pending_list_addr, n_queues)
438 { }
439
440 virtual void work(unsigned int work_id) {
441 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
442 // Simplest first cut: static partitioning.
443 int index = work_id;
444 // The increment on "index" must correspond to the maximum number of queues
445 // (n_queues) with which that ReferenceProcessor was created. That
446 // is because of the "clever" way the discovered references lists were
498 oop_store_raw(_discovered_addr, NULL);
499
500 // First _prev_next ref actually points into DiscoveredList (gross).
501 oop new_next;
502 if (_next == _ref) {
503 // At the end of the list, we should make _prev point to itself.
504 // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
505 // and _prev will be NULL.
506 new_next = _prev;
507 } else {
508 new_next = _next;
509 }
510 // Remove Reference object from discovered list. Note that G1 does not need a
511 // pre-barrier here because we know the Reference has already been found/marked,
512 // that's how it ended up in the discovered list in the first place.
513 oop_store_raw(_prev_next, new_next);
514 NOT_PRODUCT(_removed++);
515 _refs_list.dec_length(1);
516 }
517
518 // Make the Reference object active again.
519 void DiscoveredListIterator::make_active() {
520 // The pre barrier for G1 is probably just needed for the old
521 // reference processing behavior. Should we guard this with
522 // ReferenceProcessor::pending_list_uses_discovered_field() ?
523 if (UseG1GC) {
524 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
525 if (UseCompressedOops) {
526 oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL);
527 } else {
528 oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL);
529 }
530 }
531 java_lang_ref_Reference::set_next_raw(_ref, NULL);
532 }
533
534 void DiscoveredListIterator::clear_referent() {
535 oop_store_raw(_referent_addr, NULL);
536 }
537
538 // NOTE: process_phase*() are largely similar, and at a high level
539 // merely iterate over the extant list applying a predicate to
540 // each of its elements and possibly removing that element from the
541 // list and applying some further closures to that element.
542 // We should consider the possibility of replacing these
543 // process_phase*() methods by abstracting them into
544 // a single general iterator invocation that receives appropriate
545 // closures that accomplish this work.
546
547 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
548 // referents are not alive, but that should be kept alive for policy reasons.
549 // Keep alive the transitive closure of all such referents.
550 void
551 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
552 ReferencePolicy* policy,
553 BoolObjectClosure* is_alive,
554 OopClosure* keep_alive,
555 VoidClosure* complete_gc) {
556 assert(policy != NULL, "Must have a non-NULL policy");
557 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
558 // Decide which softly reachable refs should be kept alive.
559 while (iter.has_next()) {
560 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
561 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
562 if (referent_is_dead &&
563 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
564 if (TraceReferenceGC) {
565 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
566 p2i(iter.obj()), iter.obj()->klass()->internal_name());
567 }
568 // Remove Reference object from list
569 iter.remove();
570 // Make the Reference object active again
571 iter.make_active();
572 // keep the referent around
573 iter.make_referent_alive();
574 iter.move_to_next();
575 } else {
576 iter.next();
577 }
578 }
579 // Close the reachable set
580 complete_gc->do_void();
581 NOT_PRODUCT(
582 if (PrintGCDetails && TraceReferenceGC) {
583 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT
584 " discovered Refs by policy, from list " INTPTR_FORMAT,
585 iter.removed(), iter.processed(), p2i(refs_list.head()));
586 }
587 )
588 }
589
590 // Traverse the list and remove any Refs that are not active, or
591 // whose referents are either alive or NULL.
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc_implementation/shared/gcTimer.hpp"
29 #include "gc_implementation/shared/gcTraceTime.hpp"
30 #include "gc_interface/collectedHeap.hpp"
31 #include "gc_interface/collectedHeap.inline.hpp"
32 #include "memory/referencePolicy.hpp"
33 #include "memory/referenceProcessor.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "runtime/java.hpp"
36 #include "runtime/jniHandles.hpp"
37
38 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
39 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
40 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0;
41
42 void referenceProcessor_init() {
43 ReferenceProcessor::init_statics();
44 }
45
46 void ReferenceProcessor::init_statics() {
47 // We need a monotonically non-decreasing time in ms but
48 // os::javaTimeMillis() does not guarantee monotonicity.
49 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
50
51 // Initialize the soft ref timestamp clock.
52 _soft_ref_timestamp_clock = now;
53 // Also update the soft ref clock in j.l.r.SoftReference
54 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
55
56 _always_clear_soft_ref_policy = new AlwaysClearPolicy();
57 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
58 NOT_COMPILER2(LRUCurrentHeapPolicy());
59 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
60 vm_exit_during_initialization("Could not allocate reference policy object");
61 }
62 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
63 RefDiscoveryPolicy == ReferentBasedDiscovery,
64 "Unrecognized RefDiscoveryPolicy");
65 }
66
67 void ReferenceProcessor::enable_discovery(bool check_no_refs) {
68 #ifdef ASSERT
69 // Verify that we're not currently discovering refs
70 assert(!_discovering_refs, "nested call?");
71
72 if (check_no_refs) {
73 // Verify that the discovered lists are empty
74 verify_no_references_recorded();
75 }
76 #endif // ASSERT
77
78 // Someone could have modified the value of the static
79 // field in the j.l.r.SoftReference class that holds the
80 // soft reference timestamp clock using reflection or
81 // Unsafe between GCs. Unconditionally update the static
82 // field in ReferenceProcessor here so that we use the new
83 // value during reference discovery.
84
334 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
335 } else {
336 return enqueue_discovered_ref_helper<oop>(this, task_executor);
337 }
338 }
339
340 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
341 HeapWord* pending_list_addr) {
342 // Given a list of refs linked through the "discovered" field
343 // (java.lang.ref.Reference.discovered), self-loop their "next" field
344 // thus distinguishing them from active References, then
345 // prepend them to the pending list.
346 //
347 // The Java threads will see the Reference objects linked together through
348 // the discovered field. Instead of trying to do the write barrier updates
349 // in all places in the reference processor where we manipulate the discovered
350 // field we make sure to do the barrier here where we anyway iterate through
351 // all linked Reference objects. Note that it is important to not dirty any
352 // cards during reference processing since this will cause card table
353 // verification to fail for G1.
354 if (TraceReferenceGC && PrintGCDetails) {
355 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
356 INTPTR_FORMAT, p2i(refs_list.head()));
357 }
358
359 oop obj = NULL;
360 oop next_d = refs_list.head();
361 // Walk down the list, self-looping the next field
362 // so that the References are not considered active.
363 while (obj != next_d) {
364 obj = next_d;
365 assert(obj->is_instanceRef(), "should be reference object");
366 next_d = java_lang_ref_Reference::discovered(obj);
367 if (TraceReferenceGC && PrintGCDetails) {
368 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
369 p2i(obj), p2i(next_d));
370 }
371 assert(java_lang_ref_Reference::next(obj) == NULL,
372 "Reference not active; should not be discovered");
373 // Self-loop next, so as to make Ref not active.
374 java_lang_ref_Reference::set_next_raw(obj, obj);
375 if (next_d != obj) {
376 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
377 } else {
378 // This is the last object.
379 // Swap refs_list into pending_list_addr and
380 // set obj's discovered to what we read from pending_list_addr.
381 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
382 // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above.
383 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
384 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
385 }
386 }
387 }
388
389 // Parallel enqueue task
390 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
391 public:
392 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
393 DiscoveredList discovered_refs[],
394 HeapWord* pending_list_addr,
395 int n_queues)
396 : EnqueueTask(ref_processor, discovered_refs,
397 pending_list_addr, n_queues)
398 { }
399
400 virtual void work(unsigned int work_id) {
401 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
402 // Simplest first cut: static partitioning.
403 int index = work_id;
404 // The increment on "index" must correspond to the maximum number of queues
405 // (n_queues) with which that ReferenceProcessor was created. That
406 // is because of the "clever" way the discovered references lists were
458 oop_store_raw(_discovered_addr, NULL);
459
460 // First _prev_next ref actually points into DiscoveredList (gross).
461 oop new_next;
462 if (_next == _ref) {
463 // At the end of the list, we should make _prev point to itself.
464 // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
465 // and _prev will be NULL.
466 new_next = _prev;
467 } else {
468 new_next = _next;
469 }
470 // Remove Reference object from discovered list. Note that G1 does not need a
471 // pre-barrier here because we know the Reference has already been found/marked,
472 // that's how it ended up in the discovered list in the first place.
473 oop_store_raw(_prev_next, new_next);
474 NOT_PRODUCT(_removed++);
475 _refs_list.dec_length(1);
476 }
477
478 void DiscoveredListIterator::clear_referent() {
479 oop_store_raw(_referent_addr, NULL);
480 }
481
482 // NOTE: process_phase*() are largely similar, and at a high level
483 // merely iterate over the extant list applying a predicate to
484 // each of its elements and possibly removing that element from the
485 // list and applying some further closures to that element.
486 // We should consider the possibility of replacing these
487 // process_phase*() methods by abstracting them into
488 // a single general iterator invocation that receives appropriate
489 // closures that accomplish this work.
490
491 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
492 // referents are not alive, but that should be kept alive for policy reasons.
493 // Keep alive the transitive closure of all such referents.
494 void
495 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
496 ReferencePolicy* policy,
497 BoolObjectClosure* is_alive,
498 OopClosure* keep_alive,
499 VoidClosure* complete_gc) {
500 assert(policy != NULL, "Must have a non-NULL policy");
501 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
502 // Decide which softly reachable refs should be kept alive.
503 while (iter.has_next()) {
504 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
505 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
506 if (referent_is_dead &&
507 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
508 if (TraceReferenceGC) {
509 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
510 p2i(iter.obj()), iter.obj()->klass()->internal_name());
511 }
512 // Remove Reference object from list
513 iter.remove();
514 // keep the referent around
515 iter.make_referent_alive();
516 iter.move_to_next();
517 } else {
518 iter.next();
519 }
520 }
521 // Close the reachable set
522 complete_gc->do_void();
523 NOT_PRODUCT(
524 if (PrintGCDetails && TraceReferenceGC) {
525 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT
526 " discovered Refs by policy, from list " INTPTR_FORMAT,
527 iter.removed(), iter.processed(), p2i(refs_list.head()));
528 }
529 )
530 }
531
532 // Traverse the list and remove any Refs that are not active, or
533 // whose referents are either alive or NULL.
|