205 Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
206
207 // Check (the hard way) and throw if not a subklass.
208 // Result is ignored, we just need the CFG effects.
209 gen_checkcast( obj, a_e_klass );
210 }
211
212
213 void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
214 // Emit guarded new
215 // if (klass->_init_thread != current_thread ||
216 // klass->_init_state != being_initialized)
217 // uncommon_trap
218 Node* cur_thread = _gvn.transform( new (C) ThreadLocalNode() );
219 Node* merge = new (C) RegionNode(3);
220 _gvn.set_type(merge, Type::CONTROL);
221 Node* kls = makecon(TypeKlassPtr::make(klass));
222
223 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
224 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
225 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS);
226 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
227 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
228 set_control(IfTrue(iff));
229 merge->set_req(1, IfFalse(iff));
230
231 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset()));
232 adr_node = basic_plus_adr(kls, kls, init_state_offset);
233 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
234 // can generate code to load it as unsigned byte.
235 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN);
236 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
237 tst = Bool( CmpI( init_state, being_init), BoolTest::eq);
238 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
239 set_control(IfTrue(iff));
240 merge->set_req(2, IfFalse(iff));
241
242 PreserveJVMState pjvms(this);
243 record_for_igvn(merge);
244 set_control(merge);
245
246 uncommon_trap(Deoptimization::Reason_uninitialized,
247 Deoptimization::Action_reinterpret,
248 klass);
249 }
250
251
252 //------------------------------do_new-----------------------------------------
253 void Parse::do_new() {
254 kill_dead_locals();
255
337 }
338 }
339
340 //----------------------increment_and_test_invocation_counter-------------------
341 void Parse::increment_and_test_invocation_counter(int limit) {
342 if (!count_invocations()) return;
343
344 // Get the Method* node.
345 ciMethod* m = method();
346 MethodCounters* counters_adr = m->ensure_method_counters();
347 if (counters_adr == NULL) {
348 C->record_failure("method counters allocation failed");
349 return;
350 }
351
352 Node* ctrl = control();
353 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
354 Node *counters_node = makecon(adr_type);
355 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
356 MethodCounters::interpreter_invocation_counter_offset_in_bytes());
357 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type);
358
359 test_counter_against_threshold(cnt, limit);
360
361 // Add one to the counter and store
362 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
363 store_to_memory( ctrl, adr_iic_node, incr, T_INT, adr_type );
364 }
365
366 //----------------------------method_data_addressing---------------------------
367 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
368 // Get offset within MethodData* of the data array
369 ByteSize data_offset = MethodData::data_offset();
370
371 // Get cell offset of the ProfileData within data array
372 int cell_offset = md->dp_to_di(data->dp());
373
374 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
375 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
376
377 const TypePtr* adr_type = TypeMetadataPtr::make(md);
378 Node* mdo = makecon(adr_type);
379 Node* ptr = basic_plus_adr(mdo, mdo, offset);
380
381 if (stride != 0) {
382 Node* str = _gvn.MakeConX(stride);
383 Node* scale = _gvn.transform( new (C) MulXNode( idx, str ) );
384 ptr = _gvn.transform( new (C) AddPNode( mdo, ptr, scale ) );
385 }
386
387 return ptr;
388 }
389
390 //--------------------------increment_md_counter_at----------------------------
391 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
392 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
393
394 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
395 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type);
396 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
397 store_to_memory(NULL, adr_node, incr, T_INT, adr_type );
398 }
399
400 //--------------------------test_for_osr_md_counter_at-------------------------
401 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
402 Node* adr_node = method_data_addressing(md, data, counter_offset);
403
404 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
405 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type);
406
407 test_counter_against_threshold(cnt, limit);
408 }
409
410 //-------------------------------set_md_flag_at--------------------------------
411 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
412 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
413
414 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
415 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type);
416 Node* incr = _gvn.transform(new (C) OrINode(flags, _gvn.intcon(flag_constant)));
417 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type);
418 }
419
420 //----------------------------profile_taken_branch-----------------------------
421 void Parse::profile_taken_branch(int target_bci, bool force_update) {
422 // This is a potential osr_site if we have a backedge.
423 int cur_bci = bci();
424 bool osr_site =
425 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
426
427 // If we are going to OSR, restart at the target bytecode.
428 set_bci(target_bci);
429
430 // To do: factor out the the limit calculations below. These duplicate
431 // the similar limit calculations in the interpreter.
432
433 if (method_data_update() || force_update) {
434 ciMethodData* md = method()->method_data();
435 assert(md != NULL, "expected valid ciMethodData");
436 ciProfileData* data = md->bci_to_data(cur_bci);
437 assert(data->is_JumpData(), "need JumpData for taken branch");
|
205 Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
206
207 // Check (the hard way) and throw if not a subklass.
208 // Result is ignored, we just need the CFG effects.
209 gen_checkcast( obj, a_e_klass );
210 }
211
212
213 void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
214 // Emit guarded new
215 // if (klass->_init_thread != current_thread ||
216 // klass->_init_state != being_initialized)
217 // uncommon_trap
218 Node* cur_thread = _gvn.transform( new (C) ThreadLocalNode() );
219 Node* merge = new (C) RegionNode(3);
220 _gvn.set_type(merge, Type::CONTROL);
221 Node* kls = makecon(TypeKlassPtr::make(klass));
222
223 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
224 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
225 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, false, LoadNode::unordered);
226 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
227 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
228 set_control(IfTrue(iff));
229 merge->set_req(1, IfFalse(iff));
230
231 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset()));
232 adr_node = basic_plus_adr(kls, kls, init_state_offset);
233 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
234 // can generate code to load it as unsigned byte.
235 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, false, LoadNode::unordered);
236 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
237 tst = Bool( CmpI( init_state, being_init), BoolTest::eq);
238 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
239 set_control(IfTrue(iff));
240 merge->set_req(2, IfFalse(iff));
241
242 PreserveJVMState pjvms(this);
243 record_for_igvn(merge);
244 set_control(merge);
245
246 uncommon_trap(Deoptimization::Reason_uninitialized,
247 Deoptimization::Action_reinterpret,
248 klass);
249 }
250
251
252 //------------------------------do_new-----------------------------------------
253 void Parse::do_new() {
254 kill_dead_locals();
255
337 }
338 }
339
340 //----------------------increment_and_test_invocation_counter-------------------
341 void Parse::increment_and_test_invocation_counter(int limit) {
342 if (!count_invocations()) return;
343
344 // Get the Method* node.
345 ciMethod* m = method();
346 MethodCounters* counters_adr = m->ensure_method_counters();
347 if (counters_adr == NULL) {
348 C->record_failure("method counters allocation failed");
349 return;
350 }
351
352 Node* ctrl = control();
353 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
354 Node *counters_node = makecon(adr_type);
355 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
356 MethodCounters::interpreter_invocation_counter_offset_in_bytes());
357 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, false, LoadNode::unordered);
358
359 test_counter_against_threshold(cnt, limit);
360
361 // Add one to the counter and store
362 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
363 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, false, StoreNode::unordered);
364 }
365
366 //----------------------------method_data_addressing---------------------------
367 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
368 // Get offset within MethodData* of the data array
369 ByteSize data_offset = MethodData::data_offset();
370
371 // Get cell offset of the ProfileData within data array
372 int cell_offset = md->dp_to_di(data->dp());
373
374 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
375 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
376
377 const TypePtr* adr_type = TypeMetadataPtr::make(md);
378 Node* mdo = makecon(adr_type);
379 Node* ptr = basic_plus_adr(mdo, mdo, offset);
380
381 if (stride != 0) {
382 Node* str = _gvn.MakeConX(stride);
383 Node* scale = _gvn.transform( new (C) MulXNode( idx, str ) );
384 ptr = _gvn.transform( new (C) AddPNode( mdo, ptr, scale ) );
385 }
386
387 return ptr;
388 }
389
390 //--------------------------increment_md_counter_at----------------------------
391 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
392 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
393
394 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
395 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, false, LoadNode::unordered);
396 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
397 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, false, StoreNode::unordered);
398 }
399
400 //--------------------------test_for_osr_md_counter_at-------------------------
401 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
402 Node* adr_node = method_data_addressing(md, data, counter_offset);
403
404 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
405 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, false, LoadNode::unordered);
406
407 test_counter_against_threshold(cnt, limit);
408 }
409
410 //-------------------------------set_md_flag_at--------------------------------
411 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
412 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
413
414 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
415 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, false, LoadNode::unordered);
416 Node* incr = _gvn.transform(new (C) OrINode(flags, _gvn.intcon(flag_constant)));
417 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, false, StoreNode::unordered);
418 }
419
420 //----------------------------profile_taken_branch-----------------------------
421 void Parse::profile_taken_branch(int target_bci, bool force_update) {
422 // This is a potential osr_site if we have a backedge.
423 int cur_bci = bci();
424 bool osr_site =
425 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
426
427 // If we are going to OSR, restart at the target bytecode.
428 set_bci(target_bci);
429
430 // To do: factor out the the limit calculations below. These duplicate
431 // the similar limit calculations in the interpreter.
432
433 if (method_data_update() || force_update) {
434 ciMethodData* md = method()->method_data();
435 assert(md != NULL, "expected valid ciMethodData");
436 ciProfileData* data = md->bci_to_data(cur_bci);
437 assert(data->is_JumpData(), "need JumpData for taken branch");
|