Print this page


Split Split Close
Expand all
Collapse all
          --- old/src/share/vm/opto/graphKit.cpp
          +++ new/src/share/vm/opto/graphKit.cpp
↓ open down ↓ 3562 lines elided ↑ open up ↑
3563 3563      assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
3564 3564    }
3565 3565    assert(bt == T_OBJECT, "or we shouldn't be here");
3566 3566  
3567 3567    IdealKit ideal(this, true);
3568 3568  
3569 3569    Node* tls = __ thread(); // ThreadLocalStorage
3570 3570  
3571 3571    Node* no_ctrl = NULL;
3572 3572    Node* no_base = __ top();
3573      -  Node* zero = __ ConI(0);
     3573 +  Node* zero  = __ ConI(0);
     3574 +  Node* zeroX = __ ConX(0);
3574 3575  
3575 3576    float likely  = PROB_LIKELY(0.999);
3576 3577    float unlikely  = PROB_UNLIKELY(0.999);
3577 3578  
3578 3579    BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3579 3580    assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
3580 3581  
3581 3582    // Offsets into the thread
3582 3583    const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
3583 3584                                            PtrQueue::byte_offset_of_active());
↓ open down ↓ 5 lines elided ↑ open up ↑
3589 3590    // Now the actual pointers into the thread
3590 3591    Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
3591 3592    Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
3592 3593    Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
3593 3594  
3594 3595    // Now some of the values
3595 3596    Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
3596 3597  
3597 3598    // if (!marking)
3598 3599    __ if_then(marking, BoolTest::ne, zero); {
3599      -    Node* index   = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
     3600 +    BasicType index_bt = TypeX_X->basic_type();
     3601 +    assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
     3602 +    Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
3600 3603  
3601 3604      if (do_load) {
3602 3605        // load original value
3603 3606        // alias_idx correct??
3604 3607        pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
3605 3608      }
3606 3609  
3607 3610      // if (pre_val != NULL)
3608 3611      __ if_then(pre_val, BoolTest::ne, null()); {
3609 3612        Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3610 3613  
3611 3614        // is the queue for this thread full?
3612      -      __ if_then(index, BoolTest::ne, zero, likely); {
     3615 +      __ if_then(index, BoolTest::ne, zeroX, likely); {
3613 3616  
3614 3617          // decrement the index
3615      -        Node* next_index = __ SubI(index,  __ ConI(sizeof(intptr_t)));
3616      -        Node* next_indexX = next_index;
3617      -#ifdef _LP64
3618      -        // We could refine the type for what it's worth
3619      -        // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
3620      -        next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
3621      -#endif
     3618 +        Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
3622 3619  
3623 3620          // Now get the buffer location we will log the previous value into and store it
3624      -        Node *log_addr = __ AddP(no_base, buffer, next_indexX);
     3621 +        Node *log_addr = __ AddP(no_base, buffer, next_index);
3625 3622          __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
3626 3623          // update the index
3627      -        __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
     3624 +        __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
3628 3625  
3629 3626        } __ else_(); {
3630 3627  
3631 3628          // logging buffer is full, call the runtime
3632 3629          const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3633 3630          __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
3634 3631        } __ end_if();  // (!index)
3635 3632      } __ end_if();  // (pre_val != NULL)
3636 3633    } __ end_if();  // (!marking)
3637 3634  
↓ open down ↓ 6 lines elided ↑ open up ↑
3644 3641  //
3645 3642  void GraphKit::g1_mark_card(IdealKit& ideal,
3646 3643                              Node* card_adr,
3647 3644                              Node* oop_store,
3648 3645                              uint oop_alias_idx,
3649 3646                              Node* index,
3650 3647                              Node* index_adr,
3651 3648                              Node* buffer,
3652 3649                              const TypeFunc* tf) {
3653 3650  
3654      -  Node* zero = __ ConI(0);
     3651 +  Node* zero  = __ ConI(0);
     3652 +  Node* zeroX = __ ConX(0);
3655 3653    Node* no_base = __ top();
3656 3654    BasicType card_bt = T_BYTE;
3657 3655    // Smash zero into card. MUST BE ORDERED WRT TO STORE
3658 3656    __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
3659 3657  
3660 3658    //  Now do the queue work
3661      -  __ if_then(index, BoolTest::ne, zero); {
     3659 +  __ if_then(index, BoolTest::ne, zeroX); {
3662 3660  
3663      -    Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
3664      -    Node* next_indexX = next_index;
3665      -#ifdef _LP64
3666      -    // We could refine the type for what it's worth
3667      -    // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
3668      -    next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
3669      -#endif // _LP64
3670      -    Node* log_addr = __ AddP(no_base, buffer, next_indexX);
     3661 +    Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
     3662 +    Node* log_addr = __ AddP(no_base, buffer, next_index);
3671 3663  
3672 3664      __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
3673      -    __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
     3665 +    __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
3674 3666  
3675 3667    } __ else_(); {
3676 3668      __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3677 3669    } __ end_if();
3678 3670  
3679 3671  }
3680 3672  
3681 3673  void GraphKit::g1_write_barrier_post(Node* oop_store,
3682 3674                                       Node* obj,
3683 3675                                       Node* adr,
↓ open down ↓ 40 lines elided ↑ open up ↑
3724 3716                                       PtrQueue::byte_offset_of_buf());
3725 3717  
3726 3718    // Pointers into the thread
3727 3719  
3728 3720    Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
3729 3721    Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
3730 3722  
3731 3723    // Now some values
3732 3724    // Use ctrl to avoid hoisting these values past a safepoint, which could
3733 3725    // potentially reset these fields in the JavaThread.
3734      -  Node* index  = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
     3726 +  Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
3735 3727    Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
3736 3728  
3737 3729    // Convert the store obj pointer to an int prior to doing math on it
3738 3730    // Must use ctrl to prevent "integerized oop" existing across safepoint
3739 3731    Node* cast =  __ CastPX(__ ctrl(), adr);
3740 3732  
3741 3733    // Divide pointer by card size
3742 3734    Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3743 3735  
3744 3736    // Combine card table base and card offset
↓ open down ↓ 115 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX