src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7032314 Sdiff src/share/vm/opto

src/share/vm/opto/graphKit.cpp

Print this page




3367   {
3368     PreserveJVMState pjvms(this);
3369     set_control(iffalse);
3370     _sp += nargs;
3371     uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
3372   }
3373   Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff));
3374   set_control(iftrue);
3375 }
3376 
3377 //------------------------------add_predicate---------------------------------
3378 void GraphKit::add_predicate(int nargs) {
3379   if (UseLoopPredicate) {
3380     add_predicate_impl(Deoptimization::Reason_predicate, nargs);
3381   }
3382 }
3383 
3384 //----------------------------- store barriers ----------------------------
3385 #define __ ideal.
3386 
3387 void GraphKit::sync_kit(IdealKit& ideal) {

3388   // Final sync IdealKit and graphKit.
3389   __ drain_delay_transform();

3390   set_all_memory(__ merged_memory());

3391   set_control(__ ctrl());
3392 }
3393 
3394 // vanilla/CMS post barrier
3395 // Insert a write-barrier store.  This is to let generational GC work; we have
3396 // to flag all oop-stores before the next GC point.
3397 void GraphKit::write_barrier_post(Node* oop_store,
3398                                   Node* obj,
3399                                   Node* adr,
3400                                   uint  adr_idx,
3401                                   Node* val,
3402                                   bool use_precise) {
3403   // No store check needed if we're storing a NULL or an old object
3404   // (latter case is probably a string constant). The concurrent
3405   // mark sweep garbage collector, however, needs to have all nonNull
3406   // oop updates flagged via card-marks.
3407   if (val != NULL && val->is_Con()) {
3408     // must be either an oop or NULL
3409     const Type* t = val->bottom_type();
3410     if (t == TypePtr::NULL_PTR || t == Type::TOP)


3418       return;
3419   }
3420 
3421   if (use_ReduceInitialCardMarks()
3422       && obj == just_allocated_object(control())) {
3423     // We can skip marks on a freshly-allocated object in Eden.
3424     // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
3425     // That routine informs GC to take appropriate compensating steps,
3426     // upon a slow-path allocation, so as to make this card-mark
3427     // elision safe.
3428     return;
3429   }
3430 
3431   if (!use_precise) {
3432     // All card marks for a (non-array) instance are in one place:
3433     adr = obj;
3434   }
3435   // (Else it's an array (or unknown), and we want more precise card marks.)
3436   assert(adr != NULL, "");
3437 
3438   IdealKit ideal(gvn(), control(), merged_memory(), true);
3439 
3440   // Convert the pointer to an int prior to doing math on it
3441   Node* cast = __ CastPX(__ ctrl(), adr);
3442 
3443   // Divide by card size
3444   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
3445          "Only one we handle so far.");
3446   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3447 
3448   // Combine card table base and card offset
3449   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3450 
3451   // Get the alias_index for raw card-mark memory
3452   int adr_type = Compile::AliasIdxRaw;
3453   // Smash zero into card
3454   Node*   zero = __ ConI(0);
3455   BasicType bt = T_BYTE;
3456   if( !UseConcMarkSweepGC ) {
3457     __ store(__ ctrl(), card_adr, zero, bt, adr_type);
3458   } else {
3459     // Specialized path for CM store barrier
3460     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3461   }
3462 
3463   // Final sync IdealKit and GraphKit.
3464   sync_kit(ideal);
3465 }
3466 
3467 // G1 pre/post barriers
3468 void GraphKit::g1_write_barrier_pre(Node* obj,
3469                                     Node* adr,
3470                                     uint alias_idx,
3471                                     Node* val,
3472                                     const TypeOopPtr* val_type,
3473                                     BasicType bt) {
3474   IdealKit ideal(gvn(), control(), merged_memory(), true);
3475 
3476   Node* tls = __ thread(); // ThreadLocalStorage
3477 
3478   Node* no_ctrl = NULL;
3479   Node* no_base = __ top();
3480   Node* zero = __ ConI(0);
3481 
3482   float likely  = PROB_LIKELY(0.999);
3483   float unlikely  = PROB_UNLIKELY(0.999);
3484 
3485   BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3486   assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
3487 
3488   // Offsets into the thread
3489   const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
3490                                           PtrQueue::byte_offset_of_active());
3491   const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
3492                                           PtrQueue::byte_offset_of_index());
3493   const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
3494                                           PtrQueue::byte_offset_of_buf());


3597                                      Node* val,
3598                                      BasicType bt,
3599                                      bool use_precise) {
3600   // If we are writing a NULL then we need no post barrier
3601 
3602   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
3603     // Must be NULL
3604     const Type* t = val->bottom_type();
3605     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
3606     // No post barrier if writing NULLx
3607     return;
3608   }
3609 
3610   if (!use_precise) {
3611     // All card marks for a (non-array) instance are in one place:
3612     adr = obj;
3613   }
3614   // (Else it's an array (or unknown), and we want more precise card marks.)
3615   assert(adr != NULL, "");
3616 
3617   IdealKit ideal(gvn(), control(), merged_memory(), true);
3618 
3619   Node* tls = __ thread(); // ThreadLocalStorage
3620 
3621   Node* no_base = __ top();
3622   float likely  = PROB_LIKELY(0.999);
3623   float unlikely  = PROB_UNLIKELY(0.999);
3624   Node* zero = __ ConI(0);
3625   Node* zeroX = __ ConX(0);
3626 
3627   // Get the alias_index for raw card-mark memory
3628   const TypePtr* card_type = TypeRawPtr::BOTTOM;
3629 
3630   const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
3631 
3632   // Offsets into the thread
3633   const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
3634                                      PtrQueue::byte_offset_of_index());
3635   const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
3636                                      PtrQueue::byte_offset_of_buf());
3637 




3367   {
3368     PreserveJVMState pjvms(this);
3369     set_control(iffalse);
3370     _sp += nargs;
3371     uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
3372   }
3373   Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff));
3374   set_control(iftrue);
3375 }
3376 
3377 //------------------------------add_predicate---------------------------------
3378 void GraphKit::add_predicate(int nargs) {
3379   if (UseLoopPredicate) {
3380     add_predicate_impl(Deoptimization::Reason_predicate, nargs);
3381   }
3382 }
3383 
3384 //----------------------------- store barriers ----------------------------
3385 #define __ ideal.
3386 
3387 void GraphKit::sync_kit(IdealKit& ideal, bool final) {
3388   if (final) {
3389     // Final sync IdealKit and graphKit.
3390     __ drain_delay_transform();
3391   }
3392   set_all_memory(__ merged_memory());
3393   set_i_o(__ i_o());
3394   set_control(__ ctrl());
3395 }
3396 
3397 // vanilla/CMS post barrier
3398 // Insert a write-barrier store.  This is to let generational GC work; we have
3399 // to flag all oop-stores before the next GC point.
3400 void GraphKit::write_barrier_post(Node* oop_store,
3401                                   Node* obj,
3402                                   Node* adr,
3403                                   uint  adr_idx,
3404                                   Node* val,
3405                                   bool use_precise) {
3406   // No store check needed if we're storing a NULL or an old object
3407   // (latter case is probably a string constant). The concurrent
3408   // mark sweep garbage collector, however, needs to have all nonNull
3409   // oop updates flagged via card-marks.
3410   if (val != NULL && val->is_Con()) {
3411     // must be either an oop or NULL
3412     const Type* t = val->bottom_type();
3413     if (t == TypePtr::NULL_PTR || t == Type::TOP)


3421       return;
3422   }
3423 
3424   if (use_ReduceInitialCardMarks()
3425       && obj == just_allocated_object(control())) {
3426     // We can skip marks on a freshly-allocated object in Eden.
3427     // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
3428     // That routine informs GC to take appropriate compensating steps,
3429     // upon a slow-path allocation, so as to make this card-mark
3430     // elision safe.
3431     return;
3432   }
3433 
3434   if (!use_precise) {
3435     // All card marks for a (non-array) instance are in one place:
3436     adr = obj;
3437   }
3438   // (Else it's an array (or unknown), and we want more precise card marks.)
3439   assert(adr != NULL, "");
3440 
3441   IdealKit ideal(this, true);
3442 
3443   // Convert the pointer to an int prior to doing math on it
3444   Node* cast = __ CastPX(__ ctrl(), adr);
3445 
3446   // Divide by card size
3447   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
3448          "Only one we handle so far.");
3449   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3450 
3451   // Combine card table base and card offset
3452   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3453 
3454   // Get the alias_index for raw card-mark memory
3455   int adr_type = Compile::AliasIdxRaw;
3456   // Smash zero into card
3457   Node*   zero = __ ConI(0);
3458   BasicType bt = T_BYTE;
3459   if( !UseConcMarkSweepGC ) {
3460     __ store(__ ctrl(), card_adr, zero, bt, adr_type);
3461   } else {
3462     // Specialized path for CM store barrier
3463     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3464   }
3465 
3466   // Final sync IdealKit and GraphKit.
3467   sync_kit(ideal);
3468 }
3469 
3470 // G1 pre/post barriers
3471 void GraphKit::g1_write_barrier_pre(Node* obj,
3472                                     Node* adr,
3473                                     uint alias_idx,
3474                                     Node* val,
3475                                     const TypeOopPtr* val_type,
3476                                     BasicType bt) {
3477   IdealKit ideal(this, true);
3478 
3479   Node* tls = __ thread(); // ThreadLocalStorage
3480 
3481   Node* no_ctrl = NULL;
3482   Node* no_base = __ top();
3483   Node* zero = __ ConI(0);
3484 
3485   float likely  = PROB_LIKELY(0.999);
3486   float unlikely  = PROB_UNLIKELY(0.999);
3487 
3488   BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
3489   assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width");
3490 
3491   // Offsets into the thread
3492   const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +  // 648
3493                                           PtrQueue::byte_offset_of_active());
3494   const int index_offset   = in_bytes(JavaThread::satb_mark_queue_offset() +  // 656
3495                                           PtrQueue::byte_offset_of_index());
3496   const int buffer_offset  = in_bytes(JavaThread::satb_mark_queue_offset() +  // 652
3497                                           PtrQueue::byte_offset_of_buf());


3600                                      Node* val,
3601                                      BasicType bt,
3602                                      bool use_precise) {
3603   // If we are writing a NULL then we need no post barrier
3604 
3605   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
3606     // Must be NULL
3607     const Type* t = val->bottom_type();
3608     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
3609     // No post barrier if writing NULLx
3610     return;
3611   }
3612 
3613   if (!use_precise) {
3614     // All card marks for a (non-array) instance are in one place:
3615     adr = obj;
3616   }
3617   // (Else it's an array (or unknown), and we want more precise card marks.)
3618   assert(adr != NULL, "");
3619 
3620   IdealKit ideal(this, true);
3621 
3622   Node* tls = __ thread(); // ThreadLocalStorage
3623 
3624   Node* no_base = __ top();
3625   float likely  = PROB_LIKELY(0.999);
3626   float unlikely  = PROB_UNLIKELY(0.999);
3627   Node* zero = __ ConI(0);
3628   Node* zeroX = __ ConX(0);
3629 
3630   // Get the alias_index for raw card-mark memory
3631   const TypePtr* card_type = TypeRawPtr::BOTTOM;
3632 
3633   const TypeFunc *tf = OptoRuntime::g1_wb_post_Type();
3634 
3635   // Offsets into the thread
3636   const int index_offset  = in_bytes(JavaThread::dirty_card_queue_offset() +
3637                                      PtrQueue::byte_offset_of_index());
3638   const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() +
3639                                      PtrQueue::byte_offset_of_buf());
3640 


src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File