316 }
317 static inline void add_one_req(Node* dstphi, Node* src) {
318 assert(is_hidden_merge(dstphi), "must be a special merge node");
319 assert(!is_hidden_merge(src), "must not be a special merge node");
320 dstphi->add_req(src);
321 }
322
323 //-----------------------combine_exception_states------------------------------
324 // This helper function combines exception states by building phis on a
325 // specially marked state-merging region. These regions and phis are
326 // untransformed, and can build up gradually. The region is marked by
327 // having a control input of its exception map, rather than NULL. Such
328 // regions do not appear except in this function, and in use_exception_state.
329 void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) {
330 if (failing()) return; // dying anyway...
331 JVMState* ex_jvms = ex_map->_jvms;
332 assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");
333 assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
334 assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
335 assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
336 assert(ex_map->req() == phi_map->req(), "matching maps");
337 uint tos = ex_jvms->stkoff() + ex_jvms->sp();
338 Node* hidden_merge_mark = root();
339 Node* region = phi_map->control();
340 MergeMemNode* phi_mem = phi_map->merged_memory();
341 MergeMemNode* ex_mem = ex_map->merged_memory();
342 if (region->in(0) != hidden_merge_mark) {
343 // The control input is not (yet) a specially-marked region in phi_map.
344 // Make it so, and build some phis.
345 region = new (C) RegionNode(2);
346 _gvn.set_type(region, Type::CONTROL);
347 region->set_req(0, hidden_merge_mark); // marks an internal ex-state
348 region->init_req(1, phi_map->control());
349 phi_map->set_control(region);
350 Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);
351 record_for_igvn(io_phi);
352 _gvn.set_type(io_phi, Type::ABIO);
353 phi_map->set_i_o(io_phi);
354 for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) {
355 Node* m = mms.memory();
392 add_one_req(mms.memory(), mms.memory2());
393 }
394 }
395 uint limit = ex_map->req();
396 for (uint i = TypeFunc::Parms; i < limit; i++) {
397 // Skip everything in the JVMS after tos. (The ex_oop follows.)
398 if (i == tos) i = ex_jvms->monoff();
399 Node* src = ex_map->in(i);
400 Node* dst = phi_map->in(i);
401 if (src != dst) {
402 PhiNode* phi;
403 if (dst->in(0) != region) {
404 dst = phi = PhiNode::make(region, dst, _gvn.type(dst));
405 record_for_igvn(phi);
406 _gvn.set_type(phi, phi->type());
407 phi_map->set_req(i, dst);
408 // Prepare to append interesting stuff onto the new phi:
409 while (dst->req() > orig_width) dst->del_req(dst->req()-1);
410 } else {
411 assert(dst->is_Phi(), "nobody else uses a hidden region");
412 phi = (PhiNode*)dst;
413 }
414 if (add_multiple && src->in(0) == ex_control) {
415 // Both are phis.
416 add_n_reqs(dst, src);
417 } else {
418 while (dst->req() < region->req()) add_one_req(dst, src);
419 }
420 const Type* srctype = _gvn.type(src);
421 if (phi->type() != srctype) {
422 const Type* dsttype = phi->type()->meet(srctype);
423 if (phi->type() != dsttype) {
424 phi->set_type(dsttype);
425 _gvn.set_type(phi, dsttype);
426 }
427 }
428 }
429 }
430 }
431
432 //--------------------------use_exception_state--------------------------------
1421 // parser factory methods for MemNodes
1422 //
1423 // These are layered on top of the factory methods in LoadNode and StoreNode,
1424 // and integrate with the parser's memory state and _gvn engine.
1425 //
1426
1427 // factory methods in "int adr_idx"
1428 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1429 int adr_idx,
1430 bool require_atomic_access) {
1431 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1432 const TypePtr* adr_type = NULL; // debug-mode-only argument
1433 debug_only(adr_type = C->get_adr_type(adr_idx));
1434 Node* mem = memory(adr_idx);
1435 Node* ld;
1436 if (require_atomic_access && bt == T_LONG) {
1437 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
1438 } else {
1439 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
1440 }
1441 return _gvn.transform(ld);
1442 }
1443
1444 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1445 int adr_idx,
1446 bool require_atomic_access) {
1447 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1448 const TypePtr* adr_type = NULL;
1449 debug_only(adr_type = C->get_adr_type(adr_idx));
1450 Node *mem = memory(adr_idx);
1451 Node* st;
1452 if (require_atomic_access && bt == T_LONG) {
1453 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
1454 } else {
1455 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
1456 }
1457 st = _gvn.transform(st);
1458 set_memory(st, adr_idx);
1459 // Back-to-back stores can only remove intermediate store with DU info
1460 // so push on worklist for optimizer.
1461 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
3127 }
3128 if (return_size_val != NULL) {
3129 (*return_size_val) = size;
3130 }
3131
3132 // This is a precise notnull oop of the klass.
3133 // (Actually, it need not be precise if this is a reflective allocation.)
3134 // It's what we cast the result to.
3135 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3136 if (!tklass) tklass = TypeKlassPtr::OBJECT;
3137 const TypeOopPtr* oop_type = tklass->as_instance_type();
3138
3139 // Now generate allocation code
3140
3141 // The entire memory state is needed for slow path of the allocation
3142 // since GC and deoptimization can happened.
3143 Node *mem = reset_memory();
3144 set_all_memory(mem); // Create new memory state
3145
3146 AllocateNode* alloc
3147 = new (C) AllocateNode(C, AllocateNode::alloc_type(),
3148 control(), mem, i_o(),
3149 size, klass_node,
3150 initial_slow_test);
3151
3152 return set_output_for_allocation(alloc, oop_type);
3153 }
3154
3155 //-------------------------------new_array-------------------------------------
3156 // helper for both newarray and anewarray
3157 // The 'length' parameter is (obviously) the length of the array.
3158 // See comments on new_instance for the meaning of the other arguments.
3159 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3160 Node* length, // number of array elements
3161 int nargs, // number of arguments to push back for uncommon trap
3162 Node* *return_size_val) {
3163 jint layout_con = Klass::_lh_neutral_value;
3164 Node* layout_val = get_layout_helper(klass_node, layout_con);
3165 int layout_is_con = (layout_val == NULL);
3166
3167 if (!layout_is_con && !StressReflectiveCode &&
3268 if (round_mask != 0) {
3269 Node* mask = MakeConX(~round_mask);
3270 size = _gvn.transform( new(C) AndXNode(size, mask) );
3271 }
3272 // else if round_mask == 0, the size computation is self-rounding
3273
3274 if (return_size_val != NULL) {
3275 // This is the size
3276 (*return_size_val) = size;
3277 }
3278
3279 // Now generate allocation code
3280
3281 // The entire memory state is needed for slow path of the allocation
3282 // since GC and deoptimization can happened.
3283 Node *mem = reset_memory();
3284 set_all_memory(mem); // Create new memory state
3285
3286 // Create the AllocateArrayNode and its result projections
3287 AllocateArrayNode* alloc
3288 = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(),
3289 control(), mem, i_o(),
3290 size, klass_node,
3291 initial_slow_test,
3292 length);
3293
3294 // Cast to correct type. Note that the klass_node may be constant or not,
3295 // and in the latter case the actual array type will be inexact also.
3296 // (This happens via a non-constant argument to inline_native_newArray.)
3297 // In any case, the value of klass_node provides the desired array type.
3298 const TypeInt* length_type = _gvn.find_int_type(length);
3299 const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3300 if (ary_type->isa_aryptr() && length_type != NULL) {
3301 // Try to get a better type than POS for the size
3302 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3303 }
3304
3305 Node* javaoop = set_output_for_allocation(alloc, ary_type);
3306
3307 // Cast length on remaining path to be as narrow as possible
3308 if (map()->find_edge(length) >= 0) {
3309 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3310 if (ccast != length) {
3311 _gvn.set_type_bottom(ccast);
3312 record_for_igvn(ccast);
3313 replace_in_map(length, ccast);
3314 }
3315 }
3316
3317 return javaoop;
3318 }
3319
3320 // The following "Ideal_foo" functions are placed here because they recognize
3321 // the graph shapes created by the functions immediately above.
3322
3323 //---------------------------Ideal_allocation----------------------------------
3324 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3325 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3326 if (ptr == NULL) { // reduce dumb test in callers
3327 return NULL;
3328 }
3329 if (ptr->is_CheckCastPP()) { // strip a raw-to-oop cast
3330 ptr = ptr->in(1);
3331 if (ptr == NULL) return NULL;
3332 }
3333 if (ptr->is_Proj()) {
3334 Node* allo = ptr->in(0);
3335 if (allo != NULL && allo->is_Allocate()) {
3336 return allo->as_Allocate();
3337 }
3338 }
3339 // Report failure to match.
3340 return NULL;
3341 }
3342
3343 // Fancy version which also strips off an offset (and reports it to caller).
3344 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
3345 intptr_t& offset) {
3346 Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
3347 if (base == NULL) return NULL;
3348 return Ideal_allocation(base, phase);
3349 }
3350
3351 // Trace Initialize <- Proj[Parm] <- Allocate
3352 AllocateNode* InitializeNode::allocation() {
3357 return alloc->as_Allocate();
3358 }
3359 }
3360 return NULL;
3361 }
3362
3363 // Trace Allocate -> Proj[Parm] -> Initialize
3364 InitializeNode* AllocateNode::initialization() {
3365 ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
3366 if (rawoop == NULL) return NULL;
3367 for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
3368 Node* init = rawoop->fast_out(i);
3369 if (init->is_Initialize()) {
3370 assert(init->as_Initialize()->allocation() == this, "2-way link");
3371 return init->as_Initialize();
3372 }
3373 }
3374 return NULL;
3375 }
3376
3377 // Trace Allocate -> Proj[Parm] -> MemBarStoreStore
3378 MemBarStoreStoreNode* AllocateNode::storestore() {
3379 ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
3380 if (rawoop == NULL) return NULL;
3381 for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
3382 Node* storestore = rawoop->fast_out(i);
3383 if (storestore->is_MemBarStoreStore()) {
3384 return storestore->as_MemBarStoreStore();
3385 }
3386 }
3387 return NULL;
3388 }
3389
3390 //----------------------------- loop predicates ---------------------------
3391
3392 //------------------------------add_predicate_impl----------------------------
3393 void GraphKit::add_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {
3394 // Too many traps seen?
3395 if (too_many_traps(reason)) {
3396 #ifdef ASSERT
3397 if (TraceLoopPredicate) {
3398 int tc = C->trap_count(reason);
3399 tty->print("too many traps=%s tcount=%d in ",
3400 Deoptimization::trap_reason_name(reason), tc);
3401 method()->print(); // which method has too many predicate traps
3402 tty->cr();
3403 }
3404 #endif
3405 // We cannot afford to take more traps here,
3406 // do not generate predicate.
3407 return;
3408 }
3409
|
316 }
317 static inline void add_one_req(Node* dstphi, Node* src) {
318 assert(is_hidden_merge(dstphi), "must be a special merge node");
319 assert(!is_hidden_merge(src), "must not be a special merge node");
320 dstphi->add_req(src);
321 }
322
323 //-----------------------combine_exception_states------------------------------
324 // This helper function combines exception states by building phis on a
325 // specially marked state-merging region. These regions and phis are
326 // untransformed, and can build up gradually. The region is marked by
327 // having a control input of its exception map, rather than NULL. Such
328 // regions do not appear except in this function, and in use_exception_state.
329 void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) {
330 if (failing()) return; // dying anyway...
331 JVMState* ex_jvms = ex_map->_jvms;
332 assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");
333 assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
334 assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
335 assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
336 assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects");
337 assert(ex_map->req() == phi_map->req(), "matching maps");
338 uint tos = ex_jvms->stkoff() + ex_jvms->sp();
339 Node* hidden_merge_mark = root();
340 Node* region = phi_map->control();
341 MergeMemNode* phi_mem = phi_map->merged_memory();
342 MergeMemNode* ex_mem = ex_map->merged_memory();
343 if (region->in(0) != hidden_merge_mark) {
344 // The control input is not (yet) a specially-marked region in phi_map.
345 // Make it so, and build some phis.
346 region = new (C) RegionNode(2);
347 _gvn.set_type(region, Type::CONTROL);
348 region->set_req(0, hidden_merge_mark); // marks an internal ex-state
349 region->init_req(1, phi_map->control());
350 phi_map->set_control(region);
351 Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);
352 record_for_igvn(io_phi);
353 _gvn.set_type(io_phi, Type::ABIO);
354 phi_map->set_i_o(io_phi);
355 for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) {
356 Node* m = mms.memory();
393 add_one_req(mms.memory(), mms.memory2());
394 }
395 }
396 uint limit = ex_map->req();
397 for (uint i = TypeFunc::Parms; i < limit; i++) {
398 // Skip everything in the JVMS after tos. (The ex_oop follows.)
399 if (i == tos) i = ex_jvms->monoff();
400 Node* src = ex_map->in(i);
401 Node* dst = phi_map->in(i);
402 if (src != dst) {
403 PhiNode* phi;
404 if (dst->in(0) != region) {
405 dst = phi = PhiNode::make(region, dst, _gvn.type(dst));
406 record_for_igvn(phi);
407 _gvn.set_type(phi, phi->type());
408 phi_map->set_req(i, dst);
409 // Prepare to append interesting stuff onto the new phi:
410 while (dst->req() > orig_width) dst->del_req(dst->req()-1);
411 } else {
412 assert(dst->is_Phi(), "nobody else uses a hidden region");
413 phi = dst->as_Phi();
414 }
415 if (add_multiple && src->in(0) == ex_control) {
416 // Both are phis.
417 add_n_reqs(dst, src);
418 } else {
419 while (dst->req() < region->req()) add_one_req(dst, src);
420 }
421 const Type* srctype = _gvn.type(src);
422 if (phi->type() != srctype) {
423 const Type* dsttype = phi->type()->meet(srctype);
424 if (phi->type() != dsttype) {
425 phi->set_type(dsttype);
426 _gvn.set_type(phi, dsttype);
427 }
428 }
429 }
430 }
431 }
432
433 //--------------------------use_exception_state--------------------------------
1422 // parser factory methods for MemNodes
1423 //
1424 // These are layered on top of the factory methods in LoadNode and StoreNode,
1425 // and integrate with the parser's memory state and _gvn engine.
1426 //
1427
1428 // factory methods in "int adr_idx"
1429 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1430 int adr_idx,
1431 bool require_atomic_access) {
1432 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1433 const TypePtr* adr_type = NULL; // debug-mode-only argument
1434 debug_only(adr_type = C->get_adr_type(adr_idx));
1435 Node* mem = memory(adr_idx);
1436 Node* ld;
1437 if (require_atomic_access && bt == T_LONG) {
1438 ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
1439 } else {
1440 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
1441 }
1442 ld = _gvn.transform(ld);
1443 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_autobox()) {
1444 // Improve graph before escape analysis and boxing elimination.
1445 record_for_igvn(ld);
1446 }
1447 return ld;
1448 }
1449
1450 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1451 int adr_idx,
1452 bool require_atomic_access) {
1453 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1454 const TypePtr* adr_type = NULL;
1455 debug_only(adr_type = C->get_adr_type(adr_idx));
1456 Node *mem = memory(adr_idx);
1457 Node* st;
1458 if (require_atomic_access && bt == T_LONG) {
1459 st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
1460 } else {
1461 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
1462 }
1463 st = _gvn.transform(st);
1464 set_memory(st, adr_idx);
1465 // Back-to-back stores can only remove intermediate store with DU info
1466 // so push on worklist for optimizer.
1467 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
3133 }
3134 if (return_size_val != NULL) {
3135 (*return_size_val) = size;
3136 }
3137
3138 // This is a precise notnull oop of the klass.
3139 // (Actually, it need not be precise if this is a reflective allocation.)
3140 // It's what we cast the result to.
3141 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3142 if (!tklass) tklass = TypeKlassPtr::OBJECT;
3143 const TypeOopPtr* oop_type = tklass->as_instance_type();
3144
3145 // Now generate allocation code
3146
3147 // The entire memory state is needed for slow path of the allocation
3148 // since GC and deoptimization can happened.
3149 Node *mem = reset_memory();
3150 set_all_memory(mem); // Create new memory state
3151
3152 AllocateNode* alloc
3153 = new (C) AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3154 control(), mem, i_o(),
3155 size, klass_node,
3156 initial_slow_test);
3157
3158 return set_output_for_allocation(alloc, oop_type);
3159 }
3160
3161 //-------------------------------new_array-------------------------------------
3162 // helper for both newarray and anewarray
3163 // The 'length' parameter is (obviously) the length of the array.
3164 // See comments on new_instance for the meaning of the other arguments.
3165 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3166 Node* length, // number of array elements
3167 int nargs, // number of arguments to push back for uncommon trap
3168 Node* *return_size_val) {
3169 jint layout_con = Klass::_lh_neutral_value;
3170 Node* layout_val = get_layout_helper(klass_node, layout_con);
3171 int layout_is_con = (layout_val == NULL);
3172
3173 if (!layout_is_con && !StressReflectiveCode &&
3274 if (round_mask != 0) {
3275 Node* mask = MakeConX(~round_mask);
3276 size = _gvn.transform( new(C) AndXNode(size, mask) );
3277 }
3278 // else if round_mask == 0, the size computation is self-rounding
3279
3280 if (return_size_val != NULL) {
3281 // This is the size
3282 (*return_size_val) = size;
3283 }
3284
3285 // Now generate allocation code
3286
3287 // The entire memory state is needed for slow path of the allocation
3288 // since GC and deoptimization can happened.
3289 Node *mem = reset_memory();
3290 set_all_memory(mem); // Create new memory state
3291
3292 // Create the AllocateArrayNode and its result projections
3293 AllocateArrayNode* alloc
3294 = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3295 control(), mem, i_o(),
3296 size, klass_node,
3297 initial_slow_test,
3298 length);
3299
3300 // Cast to correct type. Note that the klass_node may be constant or not,
3301 // and in the latter case the actual array type will be inexact also.
3302 // (This happens via a non-constant argument to inline_native_newArray.)
3303 // In any case, the value of klass_node provides the desired array type.
3304 const TypeInt* length_type = _gvn.find_int_type(length);
3305 const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3306 if (ary_type->isa_aryptr() && length_type != NULL) {
3307 // Try to get a better type than POS for the size
3308 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3309 }
3310
3311 Node* javaoop = set_output_for_allocation(alloc, ary_type);
3312
3313 // Cast length on remaining path to be as narrow as possible
3314 if (map()->find_edge(length) >= 0) {
3315 Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3316 if (ccast != length) {
3317 _gvn.set_type_bottom(ccast);
3318 record_for_igvn(ccast);
3319 replace_in_map(length, ccast);
3320 }
3321 }
3322
3323 return javaoop;
3324 }
3325
3326 // The following "Ideal_foo" functions are placed here because they recognize
3327 // the graph shapes created by the functions immediately above.
3328
3329 //---------------------------Ideal_allocation----------------------------------
3330 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3331 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3332 if (ptr == NULL) { // reduce dumb test in callers
3333 return NULL;
3334 }
3335 ptr = ptr->uncast(); // strip a raw-to-oop cast
3336 if (ptr == NULL) return NULL;
3337
3338 if (ptr->is_Proj()) {
3339 Node* allo = ptr->in(0);
3340 if (allo != NULL && allo->is_Allocate()) {
3341 return allo->as_Allocate();
3342 }
3343 }
3344 // Report failure to match.
3345 return NULL;
3346 }
3347
3348 // Fancy version which also strips off an offset (and reports it to caller).
3349 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
3350 intptr_t& offset) {
3351 Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
3352 if (base == NULL) return NULL;
3353 return Ideal_allocation(base, phase);
3354 }
3355
3356 // Trace Initialize <- Proj[Parm] <- Allocate
3357 AllocateNode* InitializeNode::allocation() {
3362 return alloc->as_Allocate();
3363 }
3364 }
3365 return NULL;
3366 }
3367
3368 // Trace Allocate -> Proj[Parm] -> Initialize
3369 InitializeNode* AllocateNode::initialization() {
3370 ProjNode* rawoop = proj_out(AllocateNode::RawAddress);
3371 if (rawoop == NULL) return NULL;
3372 for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
3373 Node* init = rawoop->fast_out(i);
3374 if (init->is_Initialize()) {
3375 assert(init->as_Initialize()->allocation() == this, "2-way link");
3376 return init->as_Initialize();
3377 }
3378 }
3379 return NULL;
3380 }
3381
3382 //----------------------------- loop predicates ---------------------------
3383
3384 //------------------------------add_predicate_impl----------------------------
3385 void GraphKit::add_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {
3386 // Too many traps seen?
3387 if (too_many_traps(reason)) {
3388 #ifdef ASSERT
3389 if (TraceLoopPredicate) {
3390 int tc = C->trap_count(reason);
3391 tty->print("too many traps=%s tcount=%d in ",
3392 Deoptimization::trap_reason_name(reason), tc);
3393 method()->print(); // which method has too many predicate traps
3394 tty->cr();
3395 }
3396 #endif
3397 // We cannot afford to take more traps here,
3398 // do not generate predicate.
3399 return;
3400 }
3401
|