26 #include "classfile/systemDictionary.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "oops/objArrayKlass.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/memnode.hpp"
31 #include "opto/mulnode.hpp"
32 #include "opto/parse.hpp"
33 #include "opto/rootnode.hpp"
34 #include "opto/runtime.hpp"
35 #include "runtime/sharedRuntime.hpp"
36
37 //------------------------------make_dtrace_method_entry_exit ----------------
38 // Dtrace -- record entry or exit of a method if compiled with dtrace support
39 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
40 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type();
41 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :
42 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit);
43 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit";
44
45 // Get base of thread-local storage area
46 Node* thread = _gvn.transform( new (C) ThreadLocalNode() );
47
48 // Get method
49 const TypePtr* method_type = TypeMetadataPtr::make(method);
50 Node *method_node = _gvn.transform( ConNode::make(C, method_type) );
51
52 kill_dead_locals();
53
54 // For some reason, this call reads only raw memory.
55 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
56 make_runtime_call(RC_LEAF | RC_NARROW_MEM,
57 call_type, call_address,
58 call_name, raw_adr_type,
59 thread, method_node);
60 }
61
62
63 //=============================================================================
64 //------------------------------do_checkcast-----------------------------------
65 void Parse::do_checkcast() {
66 bool will_link;
158 // p's type is array-of-OOPS plus klass_offset
159 Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
160 // Get the array klass
161 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
162
163 // array_klass's type is generally INexact array-of-oop. Heroically
164 // cast the array klass to EXACT array and uncommon-trap if the cast
165 // fails.
166 bool always_see_exact_class = false;
167 if (MonomorphicArrayCheck
168 && !too_many_traps(Deoptimization::Reason_array_check)) {
169 always_see_exact_class = true;
170 // (If no MDO at all, hope for the best, until a trap actually occurs.)
171 }
172
173 // Is the array klass is exactly its defined type?
174 if (always_see_exact_class && !tak->klass_is_exact()) {
175 // Make a constant out of the inexact array klass
176 const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
177 Node* con = makecon(extak);
178 Node* cmp = _gvn.transform(new (C) CmpPNode( array_klass, con ));
179 Node* bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::eq ));
180 Node* ctrl= control();
181 { BuildCutout unless(this, bol, PROB_MAX);
182 uncommon_trap(Deoptimization::Reason_array_check,
183 Deoptimization::Action_maybe_recompile,
184 tak->klass());
185 }
186 if (stopped()) { // MUST uncommon-trap?
187 set_control(ctrl); // Then Don't Do It, just fall into the normal checking
188 } else { // Cast array klass to exactness:
189 // Use the exact constant value we know it is.
190 replace_in_map(array_klass,con);
191 CompileLog* log = C->log();
192 if (log != NULL) {
193 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
194 log->identify(tak->klass()));
195 }
196 array_klass = con; // Use cast value moving forward
197 }
198 }
199
200 // Come here for polymorphic array klasses
201
202 // Extract the array element class
203 int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
204 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
205 Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
206
207 // Check (the hard way) and throw if not a subklass.
208 // Result is ignored, we just need the CFG effects.
209 gen_checkcast( obj, a_e_klass );
210 }
211
212
213 void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
214 // Emit guarded new
215 // if (klass->_init_thread != current_thread ||
216 // klass->_init_state != being_initialized)
217 // uncommon_trap
218 Node* cur_thread = _gvn.transform( new (C) ThreadLocalNode() );
219 Node* merge = new (C) RegionNode(3);
220 _gvn.set_type(merge, Type::CONTROL);
221 Node* kls = makecon(TypeKlassPtr::make(klass));
222
223 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
224 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
225 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
226 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
227 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
228 set_control(IfTrue(iff));
229 merge->set_req(1, IfFalse(iff));
230
231 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset()));
232 adr_node = basic_plus_adr(kls, kls, init_state_offset);
233 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
234 // can generate code to load it as unsigned byte.
235 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
236 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
237 tst = Bool( CmpI( init_state, being_init), BoolTest::eq);
238 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
239 set_control(IfTrue(iff));
310 }
311 }
312
313 #endif
314
315
316 //=============================================================================
317 //
318 // parser methods for profiling
319
320
321 //----------------------test_counter_against_threshold ------------------------
322 void Parse::test_counter_against_threshold(Node* cnt, int limit) {
323 // Test the counter against the limit and uncommon trap if greater.
324
325 // This code is largely copied from the range check code in
326 // array_addressing()
327
328 // Test invocation count vs threshold
329 Node *threshold = makecon(TypeInt::make(limit));
330 Node *chk = _gvn.transform( new (C) CmpUNode( cnt, threshold) );
331 BoolTest::mask btest = BoolTest::lt;
332 Node *tst = _gvn.transform( new (C) BoolNode( chk, btest) );
333 // Branch to failure if threshold exceeded
334 { BuildCutout unless(this, tst, PROB_ALWAYS);
335 uncommon_trap(Deoptimization::Reason_age,
336 Deoptimization::Action_maybe_recompile);
337 }
338 }
339
340 //----------------------increment_and_test_invocation_counter-------------------
341 void Parse::increment_and_test_invocation_counter(int limit) {
342 if (!count_invocations()) return;
343
344 // Get the Method* node.
345 ciMethod* m = method();
346 MethodCounters* counters_adr = m->ensure_method_counters();
347 if (counters_adr == NULL) {
348 C->record_failure("method counters allocation failed");
349 return;
350 }
351
352 Node* ctrl = control();
353 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
354 Node *counters_node = makecon(adr_type);
355 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
356 MethodCounters::interpreter_invocation_counter_offset_in_bytes());
357 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
358
359 test_counter_against_threshold(cnt, limit);
360
361 // Add one to the counter and store
362 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
363 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered);
364 }
365
366 //----------------------------method_data_addressing---------------------------
367 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
368 // Get offset within MethodData* of the data array
369 ByteSize data_offset = MethodData::data_offset();
370
371 // Get cell offset of the ProfileData within data array
372 int cell_offset = md->dp_to_di(data->dp());
373
374 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
375 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
376
377 const TypePtr* adr_type = TypeMetadataPtr::make(md);
378 Node* mdo = makecon(adr_type);
379 Node* ptr = basic_plus_adr(mdo, mdo, offset);
380
381 if (stride != 0) {
382 Node* str = _gvn.MakeConX(stride);
383 Node* scale = _gvn.transform( new (C) MulXNode( idx, str ) );
384 ptr = _gvn.transform( new (C) AddPNode( mdo, ptr, scale ) );
385 }
386
387 return ptr;
388 }
389
390 //--------------------------increment_md_counter_at----------------------------
391 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
392 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
393
394 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
395 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
396 Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
397 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
398 }
399
400 //--------------------------test_for_osr_md_counter_at-------------------------
401 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
402 Node* adr_node = method_data_addressing(md, data, counter_offset);
403
404 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
405 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
406
407 test_counter_against_threshold(cnt, limit);
408 }
409
410 //-------------------------------set_md_flag_at--------------------------------
411 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
412 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
413
414 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
415 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered);
416 Node* incr = _gvn.transform(new (C) OrINode(flags, _gvn.intcon(flag_constant)));
417 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered);
418 }
419
420 //----------------------------profile_taken_branch-----------------------------
421 void Parse::profile_taken_branch(int target_bci, bool force_update) {
422 // This is a potential osr_site if we have a backedge.
423 int cur_bci = bci();
424 bool osr_site =
425 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
426
427 // If we are going to OSR, restart at the target bytecode.
428 set_bci(target_bci);
429
430 // To do: factor out the the limit calculations below. These duplicate
431 // the similar limit calculations in the interpreter.
432
433 if (method_data_update() || force_update) {
434 ciMethodData* md = method()->method_data();
435 assert(md != NULL, "expected valid ciMethodData");
436 ciProfileData* data = md->bci_to_data(cur_bci);
|
26 #include "classfile/systemDictionary.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "oops/objArrayKlass.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/memnode.hpp"
31 #include "opto/mulnode.hpp"
32 #include "opto/parse.hpp"
33 #include "opto/rootnode.hpp"
34 #include "opto/runtime.hpp"
35 #include "runtime/sharedRuntime.hpp"
36
37 //------------------------------make_dtrace_method_entry_exit ----------------
38 // Dtrace -- record entry or exit of a method if compiled with dtrace support
39 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) {
40 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type();
41 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) :
42 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit);
43 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit";
44
45 // Get base of thread-local storage area
46 Node* thread = _gvn.transform( new ThreadLocalNode() );
47
48 // Get method
49 const TypePtr* method_type = TypeMetadataPtr::make(method);
50 Node *method_node = _gvn.transform( ConNode::make(C, method_type) );
51
52 kill_dead_locals();
53
54 // For some reason, this call reads only raw memory.
55 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
56 make_runtime_call(RC_LEAF | RC_NARROW_MEM,
57 call_type, call_address,
58 call_name, raw_adr_type,
59 thread, method_node);
60 }
61
62
63 //=============================================================================
64 //------------------------------do_checkcast-----------------------------------
65 void Parse::do_checkcast() {
66 bool will_link;
158 // p's type is array-of-OOPS plus klass_offset
159 Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
160 // Get the array klass
161 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
162
163 // array_klass's type is generally INexact array-of-oop. Heroically
164 // cast the array klass to EXACT array and uncommon-trap if the cast
165 // fails.
166 bool always_see_exact_class = false;
167 if (MonomorphicArrayCheck
168 && !too_many_traps(Deoptimization::Reason_array_check)) {
169 always_see_exact_class = true;
170 // (If no MDO at all, hope for the best, until a trap actually occurs.)
171 }
172
173 // Is the array klass is exactly its defined type?
174 if (always_see_exact_class && !tak->klass_is_exact()) {
175 // Make a constant out of the inexact array klass
176 const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
177 Node* con = makecon(extak);
178 Node* cmp = _gvn.transform(new CmpPNode( array_klass, con ));
179 Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq ));
180 Node* ctrl= control();
181 { BuildCutout unless(this, bol, PROB_MAX);
182 uncommon_trap(Deoptimization::Reason_array_check,
183 Deoptimization::Action_maybe_recompile,
184 tak->klass());
185 }
186 if (stopped()) { // MUST uncommon-trap?
187 set_control(ctrl); // Then Don't Do It, just fall into the normal checking
188 } else { // Cast array klass to exactness:
189 // Use the exact constant value we know it is.
190 replace_in_map(array_klass,con);
191 CompileLog* log = C->log();
192 if (log != NULL) {
193 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
194 log->identify(tak->klass()));
195 }
196 array_klass = con; // Use cast value moving forward
197 }
198 }
199
200 // Come here for polymorphic array klasses
201
202 // Extract the array element class
203 int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
204 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
205 Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
206
207 // Check (the hard way) and throw if not a subklass.
208 // Result is ignored, we just need the CFG effects.
209 gen_checkcast( obj, a_e_klass );
210 }
211
212
213 void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
214 // Emit guarded new
215 // if (klass->_init_thread != current_thread ||
216 // klass->_init_state != being_initialized)
217 // uncommon_trap
218 Node* cur_thread = _gvn.transform( new ThreadLocalNode() );
219 Node* merge = new RegionNode(3);
220 _gvn.set_type(merge, Type::CONTROL);
221 Node* kls = makecon(TypeKlassPtr::make(klass));
222
223 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
224 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
225 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
226 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
227 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
228 set_control(IfTrue(iff));
229 merge->set_req(1, IfFalse(iff));
230
231 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset()));
232 adr_node = basic_plus_adr(kls, kls, init_state_offset);
233 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
234 // can generate code to load it as unsigned byte.
235 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
236 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
237 tst = Bool( CmpI( init_state, being_init), BoolTest::eq);
238 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
239 set_control(IfTrue(iff));
310 }
311 }
312
313 #endif
314
315
316 //=============================================================================
317 //
318 // parser methods for profiling
319
320
321 //----------------------test_counter_against_threshold ------------------------
322 void Parse::test_counter_against_threshold(Node* cnt, int limit) {
323 // Test the counter against the limit and uncommon trap if greater.
324
325 // This code is largely copied from the range check code in
326 // array_addressing()
327
328 // Test invocation count vs threshold
329 Node *threshold = makecon(TypeInt::make(limit));
330 Node *chk = _gvn.transform( new CmpUNode( cnt, threshold) );
331 BoolTest::mask btest = BoolTest::lt;
332 Node *tst = _gvn.transform( new BoolNode( chk, btest) );
333 // Branch to failure if threshold exceeded
334 { BuildCutout unless(this, tst, PROB_ALWAYS);
335 uncommon_trap(Deoptimization::Reason_age,
336 Deoptimization::Action_maybe_recompile);
337 }
338 }
339
340 //----------------------increment_and_test_invocation_counter-------------------
341 void Parse::increment_and_test_invocation_counter(int limit) {
342 if (!count_invocations()) return;
343
344 // Get the Method* node.
345 ciMethod* m = method();
346 MethodCounters* counters_adr = m->ensure_method_counters();
347 if (counters_adr == NULL) {
348 C->record_failure("method counters allocation failed");
349 return;
350 }
351
352 Node* ctrl = control();
353 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
354 Node *counters_node = makecon(adr_type);
355 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
356 MethodCounters::interpreter_invocation_counter_offset_in_bytes());
357 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
358
359 test_counter_against_threshold(cnt, limit);
360
361 // Add one to the counter and store
362 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1)));
363 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered);
364 }
365
366 //----------------------------method_data_addressing---------------------------
367 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
368 // Get offset within MethodData* of the data array
369 ByteSize data_offset = MethodData::data_offset();
370
371 // Get cell offset of the ProfileData within data array
372 int cell_offset = md->dp_to_di(data->dp());
373
374 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
375 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset);
376
377 const TypePtr* adr_type = TypeMetadataPtr::make(md);
378 Node* mdo = makecon(adr_type);
379 Node* ptr = basic_plus_adr(mdo, mdo, offset);
380
381 if (stride != 0) {
382 Node* str = _gvn.MakeConX(stride);
383 Node* scale = _gvn.transform( new MulXNode( idx, str ) );
384 ptr = _gvn.transform( new AddPNode( mdo, ptr, scale ) );
385 }
386
387 return ptr;
388 }
389
390 //--------------------------increment_md_counter_at----------------------------
391 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
392 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
393
394 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
395 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
396 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
397 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
398 }
399
400 //--------------------------test_for_osr_md_counter_at-------------------------
401 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
402 Node* adr_node = method_data_addressing(md, data, counter_offset);
403
404 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
405 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
406
407 test_counter_against_threshold(cnt, limit);
408 }
409
410 //-------------------------------set_md_flag_at--------------------------------
411 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
412 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
413
414 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
415 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered);
416 Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant)));
417 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered);
418 }
419
420 //----------------------------profile_taken_branch-----------------------------
421 void Parse::profile_taken_branch(int target_bci, bool force_update) {
422 // This is a potential osr_site if we have a backedge.
423 int cur_bci = bci();
424 bool osr_site =
425 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;
426
427 // If we are going to OSR, restart at the target bytecode.
428 set_bci(target_bci);
429
430 // To do: factor out the the limit calculations below. These duplicate
431 // the similar limit calculations in the interpreter.
432
433 if (method_data_update() || force_update) {
434 ciMethodData* md = method()->method_data();
435 assert(md != NULL, "expected valid ciMethodData");
436 ciProfileData* data = md->bci_to_data(cur_bci);
|