48 // create result type (range)
49 fields = TypeTuple::fields(0);
50 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
51
52 return TypeFunc::make(domain, range);
53 }
54
55 const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
56 const Type **fields = TypeTuple::fields(2);
57 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr
58 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
59 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
60
61 // create result type (range)
62 fields = TypeTuple::fields(0);
63 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
64
65 return TypeFunc::make(domain, range);
66 }
67
68 #define __ ideal.
69 /*
70 * Determine if the G1 pre-barrier can be removed. The pre-barrier is
71 * required by SATB to make sure all objects live at the start of the
72 * marking are kept alive, all reference updates need to any previous
73 * reference stored before writing.
74 *
75 * If the previous value is NULL there is no need to save the old value.
76 * References that are NULL are filtered during runtime by the barrier
77 * code to avoid unnecessary queuing.
78 *
79 * However in the case of newly allocated objects it might be possible to
80 * prove that the reference about to be overwritten is NULL during compile
81 * time and avoid adding the barrier code completely.
82 *
83 * The compiler needs to determine that the object in which a field is about
84 * to be written is newly allocated, and that no prior store to the same field
85 * has happened since the allocation.
86 *
87 * Returns true if the pre-barrier can be removed
435
436 // Now some values
437 // Use ctrl to avoid hoisting these values past a safepoint, which could
438 // potentially reset these fields in the JavaThread.
439 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
440 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
441
442 // Convert the store obj pointer to an int prior to doing math on it
443 // Must use ctrl to prevent "integerized oop" existing across safepoint
444 Node* cast = __ CastPX(__ ctrl(), adr);
445
446 // Divide pointer by card size
447 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
448
449 // Combine card table base and card offset
450 Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
451
452 // If we know the value being stored does it cross regions?
453
454 if (val != NULL) {
455 // Does the store cause us to cross regions?
456
457 // Should be able to do an unsigned compare of region_size instead of
458 // and extra shift. Do we have an unsigned compare??
459 // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
460 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
461
462 // if (xor_res == 0) same region so skip
463 __ if_then(xor_res, BoolTest::ne, zeroX); {
464
465 // No barrier if we are storing a NULL
466 __ if_then(val, BoolTest::ne, kit->null(), unlikely); {
467
468 // Ok must mark the card if not already dirty
469
470 // load the original value of the card
471 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
472
473 __ if_then(card_val, BoolTest::ne, young_card); {
474 kit->sync_kit(ideal);
641 // We do not require a mem bar inside pre_barrier if need_mem_bar
642 // is set: the barriers would be emitted by us.
643 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
644 }
645
646 return load;
647 }
648
649 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
650 if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {
651 return true;
652 }
653 if (node->Opcode() != Op_CallLeaf) {
654 return false;
655 }
656 CallLeafNode *call = node->as_CallLeaf();
657 if (call->_name == NULL) {
658 return false;
659 }
660
661 return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0;
662 }
663
664 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
665 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
666 assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
667 // It could be only one user, URShift node, in Object.clone() intrinsic
668 // but the new allocation is passed to arraycopy stub and it could not
669 // be scalar replaced. So we don't check the case.
670
671 // An other case of only one user (Xor) is when the value check for NULL
672 // in G1 post barrier is folded after CCP so the code which used URShift
673 // is removed.
674
675 // Take Region node before eliminating post barrier since it also
676 // eliminates CastP2X node when it has only one user.
677 Node* this_region = node->in(0);
678 assert(this_region != NULL, "");
679
680 // Remove G1 post barrier.
681
|
48 // create result type (range)
49 fields = TypeTuple::fields(0);
50 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
51
52 return TypeFunc::make(domain, range);
53 }
54
55 const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
56 const Type **fields = TypeTuple::fields(2);
57 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr
58 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
59 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
60
61 // create result type (range)
62 fields = TypeTuple::fields(0);
63 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
64
65 return TypeFunc::make(domain, range);
66 }
67
68 const TypeFunc *G1BarrierSetC2::write_ref_stats_Type() {
69 const Type **fields = TypeTuple::fields(2);
70 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // HeapWord* dst
71 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oopDesc* val
72 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
73
74 // create result type (range)
75 fields = TypeTuple::fields(0);
76 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
77
78 return TypeFunc::make(domain, range);
79 }
80
81 #define __ ideal.
82 /*
83 * Determine if the G1 pre-barrier can be removed. The pre-barrier is
84 * required by SATB to make sure all objects live at the start of the
85 * marking are kept alive, all reference updates need to any previous
86 * reference stored before writing.
87 *
88 * If the previous value is NULL there is no need to save the old value.
89 * References that are NULL are filtered during runtime by the barrier
90 * code to avoid unnecessary queuing.
91 *
92 * However in the case of newly allocated objects it might be possible to
93 * prove that the reference about to be overwritten is NULL during compile
94 * time and avoid adding the barrier code completely.
95 *
96 * The compiler needs to determine that the object in which a field is about
97 * to be written is newly allocated, and that no prior store to the same field
98 * has happened since the allocation.
99 *
100 * Returns true if the pre-barrier can be removed
448
449 // Now some values
450 // Use ctrl to avoid hoisting these values past a safepoint, which could
451 // potentially reset these fields in the JavaThread.
452 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
453 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
454
455 // Convert the store obj pointer to an int prior to doing math on it
456 // Must use ctrl to prevent "integerized oop" existing across safepoint
457 Node* cast = __ CastPX(__ ctrl(), adr);
458
459 // Divide pointer by card size
460 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
461
462 // Combine card table base and card offset
463 Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
464
465 // If we know the value being stored does it cross regions?
466
467 if (val != NULL) {
468 if (UsePerfData && G1WriteBarrierStats) {
469 const TypeFunc *stats_tf = write_ref_stats_Type();
470 __ make_leaf_call(stats_tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_stats), "write_ref_stats", adr, val);
471 }
472 // Does the store cause us to cross regions?
473
474 // Should be able to do an unsigned compare of region_size instead of
475 // and extra shift. Do we have an unsigned compare??
476 // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
477 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
478
479 // if (xor_res == 0) same region so skip
480 __ if_then(xor_res, BoolTest::ne, zeroX); {
481
482 // No barrier if we are storing a NULL
483 __ if_then(val, BoolTest::ne, kit->null(), unlikely); {
484
485 // Ok must mark the card if not already dirty
486
487 // load the original value of the card
488 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
489
490 __ if_then(card_val, BoolTest::ne, young_card); {
491 kit->sync_kit(ideal);
658 // We do not require a mem bar inside pre_barrier if need_mem_bar
659 // is set: the barriers would be emitted by us.
660 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
661 }
662
663 return load;
664 }
665
666 bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
667 if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {
668 return true;
669 }
670 if (node->Opcode() != Op_CallLeaf) {
671 return false;
672 }
673 CallLeafNode *call = node->as_CallLeaf();
674 if (call->_name == NULL) {
675 return false;
676 }
677
678 return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0 || strcmp(call->_name, "write_ref_stats") == 0;
679 }
680
681 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
682 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
683 assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
684 // It could be only one user, URShift node, in Object.clone() intrinsic
685 // but the new allocation is passed to arraycopy stub and it could not
686 // be scalar replaced. So we don't check the case.
687
688 // An other case of only one user (Xor) is when the value check for NULL
689 // in G1 post barrier is folded after CCP so the code which used URShift
690 // is removed.
691
692 // Take Region node before eliminating post barrier since it also
693 // eliminates CastP2X node when it has only one user.
694 Node* this_region = node->in(0);
695 assert(this_region != NULL, "");
696
697 // Remove G1 post barrier.
698
|