src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6877254 Sdiff src/share/vm/opto

src/share/vm/opto/graphKit.cpp

Print this page




1433 }
1434 
1435 void GraphKit::post_barrier(Node* ctl,
1436                             Node* store,
1437                             Node* obj,
1438                             Node* adr,
1439                             uint  adr_idx,
1440                             Node* val,
1441                             BasicType bt,
1442                             bool use_precise) {
1443   BarrierSet* bs = Universe::heap()->barrier_set();
1444   set_control(ctl);
1445   switch (bs->kind()) {
1446     case BarrierSet::G1SATBCT:
1447     case BarrierSet::G1SATBCTLogging:
1448       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1449       break;
1450 
1451     case BarrierSet::CardTableModRef:
1452     case BarrierSet::CardTableExtension:
1453       write_barrier_post(store, obj, adr, val, use_precise);
1454       break;
1455 
1456     case BarrierSet::ModRef:
1457       break;
1458 
1459     case BarrierSet::Other:
1460     default      :
1461       ShouldNotReachHere();
1462 
1463   }
1464 }
1465 
1466 Node* GraphKit::store_oop(Node* ctl,
1467                           Node* obj,
1468                           Node* adr,
1469                           const TypePtr* adr_type,
1470                           Node* val,
1471                           const TypeOopPtr* val_type,
1472                           BasicType bt,
1473                           bool use_precise) {


3148   }
3149   return NULL;
3150 }
3151 
3152 //----------------------------- store barriers ----------------------------
3153 #define __ ideal.
3154 
3155 void GraphKit::sync_kit(IdealKit& ideal) {
3156   // Final sync IdealKit and graphKit.
3157   __ drain_delay_transform();
3158   set_all_memory(__ merged_memory());
3159   set_control(__ ctrl());
3160 }
3161 
3162 // vanilla/CMS post barrier
3163 // Insert a write-barrier store.  This is to let generational GC work; we have
3164 // to flag all oop-stores before the next GC point.
3165 void GraphKit::write_barrier_post(Node* oop_store,
3166                                   Node* obj,
3167                                   Node* adr,

3168                                   Node* val,
3169                                   bool use_precise) {
3170   // No store check needed if we're storing a NULL or an old object
3171   // (latter case is probably a string constant). The concurrent
3172   // mark sweep garbage collector, however, needs to have all nonNull
3173   // oop updates flagged via card-marks.
3174   if (val != NULL && val->is_Con()) {
3175     // must be either an oop or NULL
3176     const Type* t = val->bottom_type();
3177     if (t == TypePtr::NULL_PTR || t == Type::TOP)
3178       // stores of null never (?) need barriers
3179       return;
3180     ciObject* con = t->is_oopptr()->const_oop();
3181     if (con != NULL
3182         && con->is_perm()
3183         && Universe::heap()->can_elide_permanent_oop_store_barriers())
3184       // no store barrier needed, because no old-to-new ref created
3185       return;
3186   }
3187 


3197   // Convert the pointer to an int prior to doing math on it
3198   Node* cast = __ CastPX(__ ctrl(), adr);
3199 
3200   // Divide by card size
3201   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
3202          "Only one we handle so far.");
3203   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3204 
3205   // Combine card table base and card offset
3206   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3207 
3208   // Get the alias_index for raw card-mark memory
3209   int adr_type = Compile::AliasIdxRaw;
3210   // Smash zero into card
3211   Node*   zero = __ ConI(0);
3212   BasicType bt = T_BYTE;
3213   if( !UseConcMarkSweepGC ) {
3214     __ store(__ ctrl(), card_adr, zero, bt, adr_type);
3215   } else {
3216     // Specialized path for CM store barrier
3217     __ storeCM(__ ctrl(), card_adr, zero, oop_store, bt, adr_type);
3218   }
3219 
3220   // Final sync IdealKit and GraphKit.
3221   sync_kit(ideal);
3222 }
3223 
3224 // G1 pre/post barriers
3225 void GraphKit::g1_write_barrier_pre(Node* obj,
3226                                     Node* adr,
3227                                     uint alias_idx,
3228                                     Node* val,
3229                                     const TypeOopPtr* val_type,
3230                                     BasicType bt) {
3231   IdealKit ideal(gvn(), control(), merged_memory(), true);
3232 
3233   Node* tls = __ thread(); // ThreadLocalStorage
3234 
3235   Node* no_ctrl = NULL;
3236   Node* no_base = __ top();
3237   Node* zero = __ ConI(0);


3297 
3298       } __ else_(); {
3299 
3300         // logging buffer is full, call the runtime
3301         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3302         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
3303       } __ end_if();  // (!index)
3304     } __ end_if();  // (orig != NULL)
3305   } __ end_if();  // (!marking)
3306 
3307   // Final sync IdealKit and GraphKit.
3308   sync_kit(ideal);
3309 }
3310 
3311 //
3312 // Update the card table and add card address to the queue
3313 //
3314 void GraphKit::g1_mark_card(IdealKit& ideal,
3315                             Node* card_adr,
3316                             Node* oop_store,

3317                             Node* index,
3318                             Node* index_adr,
3319                             Node* buffer,
3320                             const TypeFunc* tf) {
3321 
3322   Node* zero = __ ConI(0);
3323   Node* no_base = __ top();
3324   BasicType card_bt = T_BYTE;
3325   // Smash zero into card. MUST BE ORDERED WRT TO STORE
3326   __ storeCM(__ ctrl(), card_adr, zero, oop_store, card_bt, Compile::AliasIdxRaw);
3327 
3328   //  Now do the queue work
3329   __ if_then(index, BoolTest::ne, zero); {
3330 
3331     Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
3332     Node* next_indexX = next_index;
3333 #ifdef _LP64
3334     // We could refine the type for what it's worth
3335     // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
3336     next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
3337 #endif // _LP64
3338     Node* log_addr = __ AddP(no_base, buffer, next_indexX);
3339 
3340     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
3341     __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
3342 
3343   } __ else_(); {
3344     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3345   } __ end_if();
3346 


3418   if (val != NULL) {
3419     // Does the store cause us to cross regions?
3420 
3421     // Should be able to do an unsigned compare of region_size instead of
3422     // and extra shift. Do we have an unsigned compare??
3423     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
3424     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
3425 
3426     // if (xor_res == 0) same region so skip
3427     __ if_then(xor_res, BoolTest::ne, zeroX); {
3428 
3429       // No barrier if we are storing a NULL
3430       __ if_then(val, BoolTest::ne, null(), unlikely); {
3431 
3432         // Ok must mark the card if not already dirty
3433 
3434         // load the original value of the card
3435         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
3436 
3437         __ if_then(card_val, BoolTest::ne, zero); {
3438           g1_mark_card(ideal, card_adr, oop_store, index, index_adr, buffer, tf);
3439         } __ end_if();
3440       } __ end_if();
3441     } __ end_if();
3442   } else {
3443     // Object.clone() instrinsic uses this path.
3444     g1_mark_card(ideal, card_adr, oop_store, index, index_adr, buffer, tf);
3445   }
3446 
3447   // Final sync IdealKit and GraphKit.
3448   sync_kit(ideal);
3449 }
3450 #undef __
3451 


1433 }
1434 
1435 void GraphKit::post_barrier(Node* ctl,
1436                             Node* store,
1437                             Node* obj,
1438                             Node* adr,
1439                             uint  adr_idx,
1440                             Node* val,
1441                             BasicType bt,
1442                             bool use_precise) {
1443   BarrierSet* bs = Universe::heap()->barrier_set();
1444   set_control(ctl);
1445   switch (bs->kind()) {
1446     case BarrierSet::G1SATBCT:
1447     case BarrierSet::G1SATBCTLogging:
1448       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
1449       break;
1450 
1451     case BarrierSet::CardTableModRef:
1452     case BarrierSet::CardTableExtension:
1453       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1454       break;
1455 
1456     case BarrierSet::ModRef:
1457       break;
1458 
1459     case BarrierSet::Other:
1460     default      :
1461       ShouldNotReachHere();
1462 
1463   }
1464 }
1465 
1466 Node* GraphKit::store_oop(Node* ctl,
1467                           Node* obj,
1468                           Node* adr,
1469                           const TypePtr* adr_type,
1470                           Node* val,
1471                           const TypeOopPtr* val_type,
1472                           BasicType bt,
1473                           bool use_precise) {


3148   }
3149   return NULL;
3150 }
3151 
3152 //----------------------------- store barriers ----------------------------
3153 #define __ ideal.
3154 
3155 void GraphKit::sync_kit(IdealKit& ideal) {
3156   // Final sync IdealKit and graphKit.
3157   __ drain_delay_transform();
3158   set_all_memory(__ merged_memory());
3159   set_control(__ ctrl());
3160 }
3161 
3162 // vanilla/CMS post barrier
3163 // Insert a write-barrier store.  This is to let generational GC work; we have
3164 // to flag all oop-stores before the next GC point.
3165 void GraphKit::write_barrier_post(Node* oop_store,
3166                                   Node* obj,
3167                                   Node* adr,
3168                                   uint  adr_idx,
3169                                   Node* val,
3170                                   bool use_precise) {
3171   // No store check needed if we're storing a NULL or an old object
3172   // (latter case is probably a string constant). The concurrent
3173   // mark sweep garbage collector, however, needs to have all nonNull
3174   // oop updates flagged via card-marks.
3175   if (val != NULL && val->is_Con()) {
3176     // must be either an oop or NULL
3177     const Type* t = val->bottom_type();
3178     if (t == TypePtr::NULL_PTR || t == Type::TOP)
3179       // stores of null never (?) need barriers
3180       return;
3181     ciObject* con = t->is_oopptr()->const_oop();
3182     if (con != NULL
3183         && con->is_perm()
3184         && Universe::heap()->can_elide_permanent_oop_store_barriers())
3185       // no store barrier needed, because no old-to-new ref created
3186       return;
3187   }
3188 


3198   // Convert the pointer to an int prior to doing math on it
3199   Node* cast = __ CastPX(__ ctrl(), adr);
3200 
3201   // Divide by card size
3202   assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
3203          "Only one we handle so far.");
3204   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
3205 
3206   // Combine card table base and card offset
3207   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
3208 
3209   // Get the alias_index for raw card-mark memory
3210   int adr_type = Compile::AliasIdxRaw;
3211   // Smash zero into card
3212   Node*   zero = __ ConI(0);
3213   BasicType bt = T_BYTE;
3214   if( !UseConcMarkSweepGC ) {
3215     __ store(__ ctrl(), card_adr, zero, bt, adr_type);
3216   } else {
3217     // Specialized path for CM store barrier
3218     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
3219   }
3220 
3221   // Final sync IdealKit and GraphKit.
3222   sync_kit(ideal);
3223 }
3224 
3225 // G1 pre/post barriers
3226 void GraphKit::g1_write_barrier_pre(Node* obj,
3227                                     Node* adr,
3228                                     uint alias_idx,
3229                                     Node* val,
3230                                     const TypeOopPtr* val_type,
3231                                     BasicType bt) {
3232   IdealKit ideal(gvn(), control(), merged_memory(), true);
3233 
3234   Node* tls = __ thread(); // ThreadLocalStorage
3235 
3236   Node* no_ctrl = NULL;
3237   Node* no_base = __ top();
3238   Node* zero = __ ConI(0);


3298 
3299       } __ else_(); {
3300 
3301         // logging buffer is full, call the runtime
3302         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
3303         __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
3304       } __ end_if();  // (!index)
3305     } __ end_if();  // (orig != NULL)
3306   } __ end_if();  // (!marking)
3307 
3308   // Final sync IdealKit and GraphKit.
3309   sync_kit(ideal);
3310 }
3311 
3312 //
3313 // Update the card table and add card address to the queue
3314 //
3315 void GraphKit::g1_mark_card(IdealKit& ideal,
3316                             Node* card_adr,
3317                             Node* oop_store,
3318                             uint oop_alias_idx,
3319                             Node* index,
3320                             Node* index_adr,
3321                             Node* buffer,
3322                             const TypeFunc* tf) {
3323 
3324   Node* zero = __ ConI(0);
3325   Node* no_base = __ top();
3326   BasicType card_bt = T_BYTE;
3327   // Smash zero into card. MUST BE ORDERED WRT TO STORE
3328   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
3329 
3330   //  Now do the queue work
3331   __ if_then(index, BoolTest::ne, zero); {
3332 
3333     Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
3334     Node* next_indexX = next_index;
3335 #ifdef _LP64
3336     // We could refine the type for what it's worth
3337     // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
3338     next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
3339 #endif // _LP64
3340     Node* log_addr = __ AddP(no_base, buffer, next_indexX);
3341 
3342     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
3343     __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
3344 
3345   } __ else_(); {
3346     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
3347   } __ end_if();
3348 


3420   if (val != NULL) {
3421     // Does the store cause us to cross regions?
3422 
3423     // Should be able to do an unsigned compare of region_size instead of
3424     // and extra shift. Do we have an unsigned compare??
3425     // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
3426     Node* xor_res =  __ URShiftX ( __ XorX( cast,  __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
3427 
3428     // if (xor_res == 0) same region so skip
3429     __ if_then(xor_res, BoolTest::ne, zeroX); {
3430 
3431       // No barrier if we are storing a NULL
3432       __ if_then(val, BoolTest::ne, null(), unlikely); {
3433 
3434         // Ok must mark the card if not already dirty
3435 
3436         // load the original value of the card
3437         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
3438 
3439         __ if_then(card_val, BoolTest::ne, zero); {
3440           g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
3441         } __ end_if();
3442       } __ end_if();
3443     } __ end_if();
3444   } else {
3445     // Object.clone() instrinsic uses this path.
3446     g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
3447   }
3448 
3449   // Final sync IdealKit and GraphKit.
3450   sync_kit(ideal);
3451 }
3452 #undef __
3453 
src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File