1250
1251 default:
1252 fatal("unexpected type: %s", type2name(type));
1253 }
1254 assert(chk != NULL, "sanity check");
1255 chk = _gvn.transform(chk);
1256
1257 BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;
1258 BoolNode *btst = new BoolNode( chk, btest);
1259 Node *tst = _gvn.transform( btst );
1260
1261 //-----------
1262 // if peephole optimizations occurred, a prior test existed.
1263 // If a prior test existed, maybe it dominates as we can avoid this test.
1264 if (tst != btst && type == T_OBJECT) {
1265 // At this point we want to scan up the CFG to see if we can
1266 // find an identical test (and so avoid this test altogether).
1267 Node *cfg = control();
1268 int depth = 0;
1269 while( depth < 16 ) { // Limit search depth for speed
1270 if( cfg->Opcode() == Op_IfTrue &&
1271 cfg->in(0)->in(1) == tst ) {
1272 // Found prior test. Use "cast_not_null" to construct an identical
1273 // CastPP (and hence hash to) as already exists for the prior test.
1274 // Return that casted value.
1275 if (assert_null) {
1276 replace_in_map(value, null());
1277 return null(); // do not issue the redundant test
1278 }
1279 Node *oldcontrol = control();
1280 set_control(cfg);
1281 Node *res = cast_not_null(value);
1282 set_control(oldcontrol);
1283 NOT_PRODUCT(explicit_null_checks_elided++);
1284 return res;
1285 }
1286 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1287 if (cfg == NULL) break; // Quit at region nodes
1288 depth++;
1289 }
1290 }
2017 trap_request), bci());
2018 }
2019
2020 CompileLog* log = C->log();
2021 if (log != NULL) {
2022 int kid = (klass == NULL)? -1: log->identify(klass);
2023 log->begin_elem("uncommon_trap bci='%d'", bci());
2024 char buf[100];
2025 log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf),
2026 trap_request));
2027 if (kid >= 0) log->print(" klass='%d'", kid);
2028 if (comment != NULL) log->print(" comment='%s'", comment);
2029 log->end_elem();
2030 }
2031
2032 // Make sure any guarding test views this path as very unlikely
2033 Node *i0 = control()->in(0);
2034 if (i0 != NULL && i0->is_If()) { // Found a guarding if test?
2035 IfNode *iff = i0->as_If();
2036 float f = iff->_prob; // Get prob
2037 if (control()->Opcode() == Op_IfTrue) {
2038 if (f > PROB_UNLIKELY_MAG(4))
2039 iff->_prob = PROB_MIN;
2040 } else {
2041 if (f < PROB_LIKELY_MAG(4))
2042 iff->_prob = PROB_MAX;
2043 }
2044 }
2045
2046 // Clear out dead values from the debug info.
2047 kill_dead_locals();
2048
2049 // Now insert the uncommon trap subroutine call
2050 address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
2051 const TypePtr* no_memory_effects = NULL;
2052 // Pass the index of the class to be loaded
2053 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |
2054 (must_throw ? RC_MUST_THROW : 0),
2055 OptoRuntime::uncommon_trap_Type(),
2056 call_addr, "uncommon_trap", no_memory_effects,
2057 intcon(trap_request));
3093
3094 // Return final merged results
3095 set_control( _gvn.transform(region) );
3096 record_for_igvn(region);
3097 return res;
3098 }
3099
3100 //------------------------------next_monitor-----------------------------------
3101 // What number should be given to the next monitor?
3102 int GraphKit::next_monitor() {
3103 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3104 int next = current + C->sync_stack_slots();
3105 // Keep the toplevel high water mark current:
3106 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3107 return current;
3108 }
3109
3110 //------------------------------insert_mem_bar---------------------------------
3111 // Memory barrier to avoid floating things around
3112 // The membar serves as a pinch point between both control and all memory slices.
3113 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3114 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3115 mb->init_req(TypeFunc::Control, control());
3116 mb->init_req(TypeFunc::Memory, reset_memory());
3117 Node* membar = _gvn.transform(mb);
3118 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3119 set_all_memory_call(membar);
3120 return membar;
3121 }
3122
3123 //-------------------------insert_mem_bar_volatile----------------------------
3124 // Memory barrier to avoid floating things around
3125 // The membar serves as a pinch point between both control and memory(alias_idx).
3126 // If you want to make a pinch point on all memory slices, do not use this
3127 // function (even with AliasIdxBot); use insert_mem_bar() instead.
3128 Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) {
3129 // When Parse::do_put_xxx updates a volatile field, it appends a series
3130 // of MemBarVolatile nodes, one for *each* volatile field alias category.
3131 // The first membar is on the same memory slice as the field store opcode.
3132 // This forces the membar to follow the store. (Bug 6500685 broke this.)
3133 // All the other membars (for other volatile slices, including AliasIdxBot,
3134 // which stands for all unknown volatile slices) are control-dependent
3135 // on the first membar. This prevents later volatile loads or stores
3136 // from sliding up past the just-emitted store.
3137
3138 MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
3139 mb->set_req(TypeFunc::Control,control());
3140 if (alias_idx == Compile::AliasIdxBot) {
3141 mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());
3142 } else {
3143 assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");
3144 mb->set_req(TypeFunc::Memory, memory(alias_idx));
3145 }
3146 Node* membar = _gvn.transform(mb);
3147 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3148 if (alias_idx == Compile::AliasIdxBot) {
3149 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3150 } else {
3151 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3152 }
3153 return membar;
3154 }
3155
3156 void GraphKit::insert_store_load_for_barrier() {
3157 Node* mem = reset_memory();
3158 MemBarNode* mb = MemBarNode::make(C, Op_MemBarVolatile, Compile::AliasIdxBot);
3159 mb->init_req(TypeFunc::Control, control());
3160 mb->init_req(TypeFunc::Memory, mem);
3161 Node* membar = _gvn.transform(mb);
3162 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3163 Node* newmem = _gvn.transform(new ProjNode(membar, TypeFunc::Memory));
3164 set_all_memory(mem);
3165 set_memory(newmem, Compile::AliasIdxRaw);
3166 }
3167
3168
3169 //------------------------------shared_lock------------------------------------
3170 // Emit locking code.
3171 FastLockNode* GraphKit::shared_lock(Node* obj) {
3172 // bci is either a monitorenter bc or InvocationEntryBci
3173 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3174 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3175
3176 if( !GenerateSynchronizationCode )
3177 return NULL; // Not locking things?
3178 if (stopped()) // Dead monitor?
3199
3200 const TypeFunc *tf = LockNode::lock_type();
3201 LockNode *lock = new LockNode(C, tf);
3202
3203 lock->init_req( TypeFunc::Control, control() );
3204 lock->init_req( TypeFunc::Memory , mem );
3205 lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3206 lock->init_req( TypeFunc::FramePtr, frameptr() );
3207 lock->init_req( TypeFunc::ReturnAdr, top() );
3208
3209 lock->init_req(TypeFunc::Parms + 0, obj);
3210 lock->init_req(TypeFunc::Parms + 1, box);
3211 lock->init_req(TypeFunc::Parms + 2, flock);
3212 add_safepoint_edges(lock);
3213
3214 lock = _gvn.transform( lock )->as_Lock();
3215
3216 // lock has no side-effects, sets few values
3217 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
3218
3219 insert_mem_bar(Op_MemBarAcquireLock);
3220
3221 // Add this to the worklist so that the lock can be eliminated
3222 record_for_igvn(lock);
3223
3224 #ifndef PRODUCT
3225 if (PrintLockStatistics) {
3226 // Update the counter for this lock. Don't bother using an atomic
3227 // operation since we don't require absolute accuracy.
3228 lock->create_lock_counter(map()->jvms());
3229 increment_counter(lock->counter()->addr());
3230 }
3231 #endif
3232
3233 return flock;
3234 }
3235
3236
3237 //------------------------------shared_unlock----------------------------------
3238 // Emit unlocking code.
3239 void GraphKit::shared_unlock(Node* box, Node* obj) {
3240 // bci is either a monitorenter bc or InvocationEntryBci
3241 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3242 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3243
3244 if( !GenerateSynchronizationCode )
3245 return;
3246 if (stopped()) { // Dead monitor?
3247 map()->pop_monitor(); // Kill monitor from debug info
3248 return;
3249 }
3250
3251 // Memory barrier to avoid floating things down past the locked region
3252 insert_mem_bar(Op_MemBarReleaseLock);
3253
3254 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3255 UnlockNode *unlock = new UnlockNode(C, tf);
3256 #ifdef ASSERT
3257 unlock->set_dbg_jvms(sync_jvms());
3258 #endif
3259 uint raw_idx = Compile::AliasIdxRaw;
3260 unlock->init_req( TypeFunc::Control, control() );
3261 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3262 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3263 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3264 unlock->init_req( TypeFunc::ReturnAdr, top() );
3265
3266 unlock->init_req(TypeFunc::Parms + 0, obj);
3267 unlock->init_req(TypeFunc::Parms + 1, box);
3268 unlock = _gvn.transform(unlock)->as_Unlock();
3269
3270 Node* mem = reset_memory();
3271
3272 // unlock has no side-effects, sets few values
3320 bool deoptimize_on_exception) {
3321 int rawidx = Compile::AliasIdxRaw;
3322 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3323 add_safepoint_edges(alloc);
3324 Node* allocx = _gvn.transform(alloc);
3325 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3326 // create memory projection for i_o
3327 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3328 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3329
3330 // create a memory projection as for the normal control path
3331 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3332 set_memory(malloc, rawidx);
3333
3334 // a normal slow-call doesn't change i_o, but an allocation does
3335 // we create a separate i_o projection for the normal control path
3336 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3337 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3338
3339 // put in an initialization barrier
3340 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3341 rawoop)->as_Initialize();
3342 assert(alloc->initialization() == init, "2-way macro link must work");
3343 assert(init ->allocation() == alloc, "2-way macro link must work");
3344 {
3345 // Extract memory strands which may participate in the new object's
3346 // initialization, and source them from the new InitializeNode.
3347 // This will allow us to observe initializations when they occur,
3348 // and link them properly (as a group) to the InitializeNode.
3349 assert(init->in(InitializeNode::Memory) == malloc, "");
3350 MergeMemNode* minit_in = MergeMemNode::make(malloc);
3351 init->set_req(InitializeNode::Memory, minit_in);
3352 record_for_igvn(minit_in); // fold it up later, if possible
3353 Node* minit_out = memory(rawidx);
3354 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3355 if (oop_type->isa_aryptr()) {
3356 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3357 int elemidx = C->get_alias_index(telemref);
3358 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3359 } else if (oop_type->isa_instptr()) {
3360 ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
4383 store_to_memory(ctrl, basic_plus_adr(str, coder_offset),
4384 value, T_BYTE, coder_field_idx, MemNode::unordered);
4385 }
4386
4387 // Capture src and dst memory state with a MergeMemNode
4388 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4389 if (src_type == dst_type) {
4390 // Types are equal, we don't need a MergeMemNode
4391 return memory(src_type);
4392 }
4393 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4394 record_for_igvn(merge); // fold it up later, if possible
4395 int src_idx = C->get_alias_index(src_type);
4396 int dst_idx = C->get_alias_index(dst_type);
4397 merge->set_memory_at(src_idx, memory(src_idx));
4398 merge->set_memory_at(dst_idx, memory(dst_idx));
4399 return merge;
4400 }
4401
4402 Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) {
4403 assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported");
4404 assert(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS, "invalid source type");
4405 // If input and output memory types differ, capture both states to preserve
4406 // the dependency between preceding and subsequent loads/stores.
4407 // For example, the following program:
4408 // StoreB
4409 // compress_string
4410 // LoadB
4411 // has this memory graph (use->def):
4412 // LoadB -> compress_string -> CharMem
4413 // ... -> StoreB -> ByteMem
4414 // The intrinsic hides the dependency between LoadB and StoreB, causing
4415 // the load to read from memory not containing the result of the StoreB.
4416 // The correct memory graph should look like this:
4417 // LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem))
4418 Node* mem = capture_memory(src_type, TypeAryPtr::BYTES);
4419 StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count);
4420 Node* res_mem = _gvn.transform(new SCMemProjNode(str));
4421 set_memory(res_mem, TypeAryPtr::BYTES);
4422 return str;
4423 }
4424
4425 void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) {
4426 assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported");
4427 assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type");
4428 // Capture src and dst memory (see comment in 'compress_string').
4429 Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type);
4430 StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count);
4431 set_memory(_gvn.transform(str), dst_type);
4432 }
4433
4434 void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) {
4435 /**
4436 * int i_char = start;
4437 * for (int i_byte = 0; i_byte < count; i_byte++) {
4438 * dst[i_char++] = (char)(src[i_byte] & 0xff);
4439 * }
4440 */
4441 add_predicate();
4442 RegionNode* head = new RegionNode(3);
4443 head->init_req(1, control());
4444 gvn().set_type(head, Type::CONTROL);
4445 record_for_igvn(head);
4446
|
1250
1251 default:
1252 fatal("unexpected type: %s", type2name(type));
1253 }
1254 assert(chk != NULL, "sanity check");
1255 chk = _gvn.transform(chk);
1256
1257 BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;
1258 BoolNode *btst = new BoolNode( chk, btest);
1259 Node *tst = _gvn.transform( btst );
1260
1261 //-----------
1262 // if peephole optimizations occurred, a prior test existed.
1263 // If a prior test existed, maybe it dominates as we can avoid this test.
1264 if (tst != btst && type == T_OBJECT) {
1265 // At this point we want to scan up the CFG to see if we can
1266 // find an identical test (and so avoid this test altogether).
1267 Node *cfg = control();
1268 int depth = 0;
1269 while( depth < 16 ) { // Limit search depth for speed
1270 if( cfg->Opcode() == Opcodes::Op_IfTrue &&
1271 cfg->in(0)->in(1) == tst ) {
1272 // Found prior test. Use "cast_not_null" to construct an identical
1273 // CastPP (and hence hash to) as already exists for the prior test.
1274 // Return that casted value.
1275 if (assert_null) {
1276 replace_in_map(value, null());
1277 return null(); // do not issue the redundant test
1278 }
1279 Node *oldcontrol = control();
1280 set_control(cfg);
1281 Node *res = cast_not_null(value);
1282 set_control(oldcontrol);
1283 NOT_PRODUCT(explicit_null_checks_elided++);
1284 return res;
1285 }
1286 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1287 if (cfg == NULL) break; // Quit at region nodes
1288 depth++;
1289 }
1290 }
2017 trap_request), bci());
2018 }
2019
2020 CompileLog* log = C->log();
2021 if (log != NULL) {
2022 int kid = (klass == NULL)? -1: log->identify(klass);
2023 log->begin_elem("uncommon_trap bci='%d'", bci());
2024 char buf[100];
2025 log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf),
2026 trap_request));
2027 if (kid >= 0) log->print(" klass='%d'", kid);
2028 if (comment != NULL) log->print(" comment='%s'", comment);
2029 log->end_elem();
2030 }
2031
2032 // Make sure any guarding test views this path as very unlikely
2033 Node *i0 = control()->in(0);
2034 if (i0 != NULL && i0->is_If()) { // Found a guarding if test?
2035 IfNode *iff = i0->as_If();
2036 float f = iff->_prob; // Get prob
2037 if (control()->Opcode() == Opcodes::Op_IfTrue) {
2038 if (f > PROB_UNLIKELY_MAG(4))
2039 iff->_prob = PROB_MIN;
2040 } else {
2041 if (f < PROB_LIKELY_MAG(4))
2042 iff->_prob = PROB_MAX;
2043 }
2044 }
2045
2046 // Clear out dead values from the debug info.
2047 kill_dead_locals();
2048
2049 // Now insert the uncommon trap subroutine call
2050 address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
2051 const TypePtr* no_memory_effects = NULL;
2052 // Pass the index of the class to be loaded
2053 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |
2054 (must_throw ? RC_MUST_THROW : 0),
2055 OptoRuntime::uncommon_trap_Type(),
2056 call_addr, "uncommon_trap", no_memory_effects,
2057 intcon(trap_request));
3093
3094 // Return final merged results
3095 set_control( _gvn.transform(region) );
3096 record_for_igvn(region);
3097 return res;
3098 }
3099
3100 //------------------------------next_monitor-----------------------------------
3101 // What number should be given to the next monitor?
3102 int GraphKit::next_monitor() {
3103 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3104 int next = current + C->sync_stack_slots();
3105 // Keep the toplevel high water mark current:
3106 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3107 return current;
3108 }
3109
3110 //------------------------------insert_mem_bar---------------------------------
3111 // Memory barrier to avoid floating things around
3112 // The membar serves as a pinch point between both control and all memory slices.
3113 Node* GraphKit::insert_mem_bar(Opcodes opcode, Node* precedent) {
3114 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3115 mb->init_req(TypeFunc::Control, control());
3116 mb->init_req(TypeFunc::Memory, reset_memory());
3117 Node* membar = _gvn.transform(mb);
3118 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3119 set_all_memory_call(membar);
3120 return membar;
3121 }
3122
3123 //-------------------------insert_mem_bar_volatile----------------------------
3124 // Memory barrier to avoid floating things around
3125 // The membar serves as a pinch point between both control and memory(alias_idx).
3126 // If you want to make a pinch point on all memory slices, do not use this
3127 // function (even with AliasIdxBot); use insert_mem_bar() instead.
3128 Node* GraphKit::insert_mem_bar_volatile(Opcodes opcode, int alias_idx, Node* precedent) {
3129 // When Parse::do_put_xxx updates a volatile field, it appends a series
3130 // of MemBarVolatile nodes, one for *each* volatile field alias category.
3131 // The first membar is on the same memory slice as the field store opcode.
3132 // This forces the membar to follow the store. (Bug 6500685 broke this.)
3133 // All the other membars (for other volatile slices, including AliasIdxBot,
3134 // which stands for all unknown volatile slices) are control-dependent
3135 // on the first membar. This prevents later volatile loads or stores
3136 // from sliding up past the just-emitted store.
3137
3138 MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
3139 mb->set_req(TypeFunc::Control,control());
3140 if (alias_idx == Compile::AliasIdxBot) {
3141 mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());
3142 } else {
3143 assert(!(opcode == Opcodes::Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");
3144 mb->set_req(TypeFunc::Memory, memory(alias_idx));
3145 }
3146 Node* membar = _gvn.transform(mb);
3147 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3148 if (alias_idx == Compile::AliasIdxBot) {
3149 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3150 } else {
3151 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3152 }
3153 return membar;
3154 }
3155
3156 void GraphKit::insert_store_load_for_barrier() {
3157 Node* mem = reset_memory();
3158 MemBarNode* mb = MemBarNode::make(C, Opcodes::Op_MemBarVolatile, Compile::AliasIdxBot);
3159 mb->init_req(TypeFunc::Control, control());
3160 mb->init_req(TypeFunc::Memory, mem);
3161 Node* membar = _gvn.transform(mb);
3162 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3163 Node* newmem = _gvn.transform(new ProjNode(membar, TypeFunc::Memory));
3164 set_all_memory(mem);
3165 set_memory(newmem, Compile::AliasIdxRaw);
3166 }
3167
3168
3169 //------------------------------shared_lock------------------------------------
3170 // Emit locking code.
3171 FastLockNode* GraphKit::shared_lock(Node* obj) {
3172 // bci is either a monitorenter bc or InvocationEntryBci
3173 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3174 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3175
3176 if( !GenerateSynchronizationCode )
3177 return NULL; // Not locking things?
3178 if (stopped()) // Dead monitor?
3199
3200 const TypeFunc *tf = LockNode::lock_type();
3201 LockNode *lock = new LockNode(C, tf);
3202
3203 lock->init_req( TypeFunc::Control, control() );
3204 lock->init_req( TypeFunc::Memory , mem );
3205 lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3206 lock->init_req( TypeFunc::FramePtr, frameptr() );
3207 lock->init_req( TypeFunc::ReturnAdr, top() );
3208
3209 lock->init_req(TypeFunc::Parms + 0, obj);
3210 lock->init_req(TypeFunc::Parms + 1, box);
3211 lock->init_req(TypeFunc::Parms + 2, flock);
3212 add_safepoint_edges(lock);
3213
3214 lock = _gvn.transform( lock )->as_Lock();
3215
3216 // lock has no side-effects, sets few values
3217 set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
3218
3219 insert_mem_bar(Opcodes::Op_MemBarAcquireLock);
3220
3221 // Add this to the worklist so that the lock can be eliminated
3222 record_for_igvn(lock);
3223
3224 #ifndef PRODUCT
3225 if (PrintLockStatistics) {
3226 // Update the counter for this lock. Don't bother using an atomic
3227 // operation since we don't require absolute accuracy.
3228 lock->create_lock_counter(map()->jvms());
3229 increment_counter(lock->counter()->addr());
3230 }
3231 #endif
3232
3233 return flock;
3234 }
3235
3236
3237 //------------------------------shared_unlock----------------------------------
3238 // Emit unlocking code.
3239 void GraphKit::shared_unlock(Node* box, Node* obj) {
3240 // bci is either a monitorenter bc or InvocationEntryBci
3241 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3242 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3243
3244 if( !GenerateSynchronizationCode )
3245 return;
3246 if (stopped()) { // Dead monitor?
3247 map()->pop_monitor(); // Kill monitor from debug info
3248 return;
3249 }
3250
3251 // Memory barrier to avoid floating things down past the locked region
3252 insert_mem_bar(Opcodes::Op_MemBarReleaseLock);
3253
3254 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3255 UnlockNode *unlock = new UnlockNode(C, tf);
3256 #ifdef ASSERT
3257 unlock->set_dbg_jvms(sync_jvms());
3258 #endif
3259 uint raw_idx = Compile::AliasIdxRaw;
3260 unlock->init_req( TypeFunc::Control, control() );
3261 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3262 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3263 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3264 unlock->init_req( TypeFunc::ReturnAdr, top() );
3265
3266 unlock->init_req(TypeFunc::Parms + 0, obj);
3267 unlock->init_req(TypeFunc::Parms + 1, box);
3268 unlock = _gvn.transform(unlock)->as_Unlock();
3269
3270 Node* mem = reset_memory();
3271
3272 // unlock has no side-effects, sets few values
3320 bool deoptimize_on_exception) {
3321 int rawidx = Compile::AliasIdxRaw;
3322 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3323 add_safepoint_edges(alloc);
3324 Node* allocx = _gvn.transform(alloc);
3325 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3326 // create memory projection for i_o
3327 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3328 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3329
3330 // create a memory projection as for the normal control path
3331 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3332 set_memory(malloc, rawidx);
3333
3334 // a normal slow-call doesn't change i_o, but an allocation does
3335 // we create a separate i_o projection for the normal control path
3336 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3337 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3338
3339 // put in an initialization barrier
3340 InitializeNode* init = insert_mem_bar_volatile(Opcodes::Op_Initialize, rawidx,
3341 rawoop)->as_Initialize();
3342 assert(alloc->initialization() == init, "2-way macro link must work");
3343 assert(init ->allocation() == alloc, "2-way macro link must work");
3344 {
3345 // Extract memory strands which may participate in the new object's
3346 // initialization, and source them from the new InitializeNode.
3347 // This will allow us to observe initializations when they occur,
3348 // and link them properly (as a group) to the InitializeNode.
3349 assert(init->in(InitializeNode::Memory) == malloc, "");
3350 MergeMemNode* minit_in = MergeMemNode::make(malloc);
3351 init->set_req(InitializeNode::Memory, minit_in);
3352 record_for_igvn(minit_in); // fold it up later, if possible
3353 Node* minit_out = memory(rawidx);
3354 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3355 if (oop_type->isa_aryptr()) {
3356 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3357 int elemidx = C->get_alias_index(telemref);
3358 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3359 } else if (oop_type->isa_instptr()) {
3360 ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
4383 store_to_memory(ctrl, basic_plus_adr(str, coder_offset),
4384 value, T_BYTE, coder_field_idx, MemNode::unordered);
4385 }
4386
4387 // Capture src and dst memory state with a MergeMemNode
4388 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4389 if (src_type == dst_type) {
4390 // Types are equal, we don't need a MergeMemNode
4391 return memory(src_type);
4392 }
4393 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4394 record_for_igvn(merge); // fold it up later, if possible
4395 int src_idx = C->get_alias_index(src_type);
4396 int dst_idx = C->get_alias_index(dst_type);
4397 merge->set_memory_at(src_idx, memory(src_idx));
4398 merge->set_memory_at(dst_idx, memory(dst_idx));
4399 return merge;
4400 }
4401
4402 Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) {
4403 assert(Matcher::match_rule_supported(Opcodes::Op_StrCompressedCopy), "Intrinsic not supported");
4404 assert(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS, "invalid source type");
4405 // If input and output memory types differ, capture both states to preserve
4406 // the dependency between preceding and subsequent loads/stores.
4407 // For example, the following program:
4408 // StoreB
4409 // compress_string
4410 // LoadB
4411 // has this memory graph (use->def):
4412 // LoadB -> compress_string -> CharMem
4413 // ... -> StoreB -> ByteMem
4414 // The intrinsic hides the dependency between LoadB and StoreB, causing
4415 // the load to read from memory not containing the result of the StoreB.
4416 // The correct memory graph should look like this:
4417 // LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem))
4418 Node* mem = capture_memory(src_type, TypeAryPtr::BYTES);
4419 StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count);
4420 Node* res_mem = _gvn.transform(new SCMemProjNode(str));
4421 set_memory(res_mem, TypeAryPtr::BYTES);
4422 return str;
4423 }
4424
4425 void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) {
4426 assert(Matcher::match_rule_supported(Opcodes::Op_StrInflatedCopy), "Intrinsic not supported");
4427 assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type");
4428 // Capture src and dst memory (see comment in 'compress_string').
4429 Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type);
4430 StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count);
4431 set_memory(_gvn.transform(str), dst_type);
4432 }
4433
4434 void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) {
4435 /**
4436 * int i_char = start;
4437 * for (int i_byte = 0; i_byte < count; i_byte++) {
4438 * dst[i_char++] = (char)(src[i_byte] & 0xff);
4439 * }
4440 */
4441 add_predicate();
4442 RegionNode* head = new RegionNode(3);
4443 head->init_req(1, control());
4444 gvn().set_type(head, Type::CONTROL);
4445 record_for_igvn(head);
4446
|