< prev index next >

src/hotspot/share/opto/parse2.cpp

Print this page




  56 void Parse::array_load(BasicType bt) {
  57   const Type* elemtype = Type::TOP;
  58   Node* adr = array_addressing(bt, 0, &elemtype);
  59   if (stopped())  return;     // guaranteed null or range check
  60 
  61   Node* idx = pop();
  62   Node* ary = pop();
  63 
  64   // Handle value type arrays
  65   const TypeOopPtr* elemptr = elemtype->make_oopptr();
  66   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
  67   if (elemtype->isa_valuetype() != NULL) {
  68     C->set_flattened_accesses();
  69     // Load from flattened value type array
  70     Node* vt = ValueTypeNode::make_from_flattened(this, elemtype->value_klass(), ary, adr);
  71     push(vt);
  72     return;
  73   } else if (elemptr != NULL && elemptr->is_valuetypeptr() && !elemptr->maybe_null()) {
  74     // Load from non-flattened but flattenable value type array (elements can never be null)
  75     bt = T_VALUETYPE;
  76   } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() &&
  77              !ary_t->klass_is_exact() && (!elemptr->is_valuetypeptr() || elemptr->value_klass()->flatten_array())) {
  78     // Cannot statically determine if array is flattened, emit runtime check


  79     Node* ctl = control();
  80     IdealKit ideal(this);
  81     IdealVariable res(ideal);
  82     ideal.declarations_done();
  83     Node* kls = load_object_klass(ary);
  84     Node* tag = load_lh_array_tag(kls);
  85     ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); {
  86       // non-flattened
  87       sync_kit(ideal);
  88       const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  89       Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt,
  90                                 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD, ctl);
  91       ideal.sync_kit(this);
  92       ideal.set(res, ld);
  93     } ideal.else_(); {
  94       // flattened
  95       sync_kit(ideal);
  96       if (elemptr->is_valuetypeptr()) {
  97         // Element type is known, cast and load from flattened representation
  98         assert(elemptr->maybe_null(), "must be nullable");
  99         ciValueKlass* vk = elemptr->value_klass();
 100         assert(vk->flatten_array(), "must be flattenable");
 101         ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true);
 102         const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 103         Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 104         adr = array_element_address(cast, idx, T_VALUETYPE, ary_t->size(), control());
 105         Node* vt = ValueTypeNode::make_from_flattened(this, vk, cast, adr)->allocate(this, false, false)->get_oop();
 106         ideal.set(res, vt);
 107         ideal.sync_kit(this);
 108       } else {
 109         // Element type is unknown, emit runtime call
 110         assert(!ary_t->klass_is_exact(), "should not have exact type here");
 111         Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset()));
 112         Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
 113         Node* obj_size  = NULL;
 114         kill_dead_locals();
 115         inc_sp(2);
 116         Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
 117         dec_sp(2);
 118 
 119         AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
 120         assert(alloc->maybe_set_complete(&_gvn), "");
 121         alloc->initialization()->set_complete_with_arraycopy();
 122 
 123         // This membar keeps this access to an unknown flattened array
 124         // correctly ordered with other unknown and known flattened
 125         // array accesses.
 126         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 127 
 128         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 129         // Unknown value type might contain reference fields
 130         if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) {


 198 }
 199 
 200 
 201 //--------------------------------array_store----------------------------------
 202 void Parse::array_store(BasicType bt) {
 203   const Type* elemtype = Type::TOP;
 204   Node* adr = array_addressing(bt, type2size[bt], &elemtype);
 205   if (stopped())  return;     // guaranteed null or range check
 206   Node* cast_val = NULL;
 207   if (bt == T_OBJECT) {
 208     cast_val = array_store_check();
 209     if (stopped()) return;
 210   }
 211   Node* val = pop_node(bt); // Value to store
 212   Node* idx = pop();        // Index in the array
 213   Node* ary = pop();        // The array itself
 214 
 215   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
 216   if (bt == T_OBJECT) {
 217     const TypeOopPtr* elemptr = elemtype->make_oopptr();
 218     const Type* val_t = _gvn.type(val);
 219     if (elemtype->isa_valuetype() != NULL) {
 220       C->set_flattened_accesses();
 221       // Store to flattened value type array
 222       if (!cast_val->is_ValueType()) {
 223         inc_sp(3);
 224         cast_val = null_check(cast_val);
 225         if (stopped()) return;
 226         dec_sp(3);
 227         cast_val = ValueTypeNode::make_from_oop(this, cast_val, elemtype->value_klass());
 228       }
 229       cast_val->as_ValueType()->store_flattened(this, ary, adr);
 230       return;
 231     } else if (elemptr->is_valuetypeptr() && !elemptr->maybe_null()) {
 232       // Store to non-flattened but flattenable value type array (elements can never be null)
 233       if (!cast_val->is_ValueType()) {
 234         inc_sp(3);
 235         cast_val = null_check(cast_val);
 236         if (stopped()) return;
 237         dec_sp(3);
 238       }
 239     } else if (elemptr->can_be_value_type() && (!ary_t->klass_is_exact() || elemptr->is_valuetypeptr()) &&
 240                (val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) {
 241       // Cannot statically determine if array is flattened, emit runtime check
 242       ciValueKlass* vk = NULL;
 243       // Try to determine the value klass
 244       if (val->is_ValueType()) {
 245         vk = val_t->value_klass();
 246       } else if (elemptr->is_valuetypeptr()) {
 247         vk = elemptr->value_klass();
 248       }
 249       if (ValueArrayFlatten && (vk == NULL || vk->flatten_array())) {


 250         IdealKit ideal(this);
 251         Node* kls = load_object_klass(ary);
 252         Node* layout_val = load_lh_array_tag(kls);
 253         ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); {

 254           // non-flattened
 255           sync_kit(ideal);
 256           gen_value_array_null_guard(ary, val, 3);
 257           const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 258           elemtype = ary_t->elem()->make_oopptr();
 259           access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false, false);
 260           ideal.sync_kit(this);
 261         } ideal.else_(); {


 262           // flattened
 263           if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) {
 264             // Add null check
 265             sync_kit(ideal);
 266             Node* null_ctl = top();
 267             val = null_check_oop(val, &null_ctl);
 268             if (null_ctl != top()) {
 269               PreserveJVMState pjvms(this);
 270               inc_sp(3);
 271               set_control(null_ctl);
 272               uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
 273               dec_sp(3);
 274             }
 275             ideal.sync_kit(this);
 276           }
 277           if (vk != NULL && !stopped()) {
 278             // Element type is known, cast and store to flattened representation
 279             sync_kit(ideal);
 280             assert(vk->flatten_array(), "must be flattenable");
 281             assert(elemptr->maybe_null(), "must be nullable");
 282             ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true);
 283             const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 284             ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 285             adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control());
 286             if (!val->is_ValueType()) {
 287               assert(!gvn().type(val)->maybe_null(), "value type array elements should never be null");
 288               val = ValueTypeNode::make_from_oop(this, val, vk);
 289             }
 290             val->as_ValueType()->store_flattened(this, ary, adr);
 291             ideal.sync_kit(this);
 292           } else if (!ideal.ctrl()->is_top()) {
 293             // Element type is unknown, emit runtime call
 294             assert(!ary_t->klass_is_exact(), "should not have exact type here");
 295             sync_kit(ideal);
 296 
 297             // This membar keeps this access to an unknown flattened
 298             // array correctly ordered with other unknown and known
 299             // flattened array accesses.
 300             insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 301             ideal.sync_kit(this);
 302 
 303             ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(),
 304                                  CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value),
 305                                  "store_unknown_value",
 306                                  val, ary, idx);
 307 
 308             sync_kit(ideal);
 309             // Same as MemBarCPUOrder above: keep this unknown
 310             // flattened array access correctly ordered with other
 311             // flattened array access
 312             insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 313             ideal.sync_kit(this);
 314 
 315           }
 316         } ideal.end_if();

 317         sync_kit(ideal);
 318         return;
 319       } else {
 320         gen_value_array_null_guard(ary, val, 3);

 321       }
 322     }
 323   }
 324 
 325   if (elemtype == TypeInt::BOOL) {
 326     bt = T_BOOLEAN;
 327   } else if (bt == T_OBJECT) {
 328     elemtype = ary_t->elem()->make_oopptr();
 329   }
 330 
 331   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 332 
 333   access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 334 }
 335 
 336 
 337 //------------------------------array_addressing-------------------------------
 338 // Pull array and index from the stack.  Compute pointer-to-element.
 339 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
 340   Node *idx   = peek(0+vals);   // Get from stack without popping


 412       set_control(_gvn.transform(new IfFalseNode(rc)));
 413       if (C->allow_range_check_smearing()) {
 414         // Do not use builtin_throw, since range checks are sometimes
 415         // made more stringent by an optimistic transformation.
 416         // This creates "tentative" range checks at this point,
 417         // which are not guaranteed to throw exceptions.
 418         // See IfNode::Ideal, is_range_check, adjust_check.
 419         uncommon_trap(Deoptimization::Reason_range_check,
 420                       Deoptimization::Action_make_not_entrant,
 421                       NULL, "range_check");
 422       } else {
 423         // If we have already recompiled with the range-check-widening
 424         // heroic optimization turned off, then we must really be throwing
 425         // range check exceptions.
 426         builtin_throw(Deoptimization::Reason_range_check, idx);
 427       }
 428     }
 429   }
 430   // Check for always knowing you are throwing a range-check exception
 431   if (stopped())  return top();














 432 
 433   // Make array address computation control dependent to prevent it
 434   // from floating above the range check during loop optimizations.
 435   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 436 
 437   if (result2 != NULL)  *result2 = elemtype;
 438 
 439   assert(ptr != top(), "top should go hand-in-hand with stopped");
 440 
 441   return ptr;
 442 }
 443 
 444 
 445 // returns IfNode
 446 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 447   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 448   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 449   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 450   return iff;
 451 }




  56 void Parse::array_load(BasicType bt) {
  57   const Type* elemtype = Type::TOP;
  58   Node* adr = array_addressing(bt, 0, &elemtype);
  59   if (stopped())  return;     // guaranteed null or range check
  60 
  61   Node* idx = pop();
  62   Node* ary = pop();
  63 
  64   // Handle value type arrays
  65   const TypeOopPtr* elemptr = elemtype->make_oopptr();
  66   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
  67   if (elemtype->isa_valuetype() != NULL) {
  68     C->set_flattened_accesses();
  69     // Load from flattened value type array
  70     Node* vt = ValueTypeNode::make_from_flattened(this, elemtype->value_klass(), ary, adr);
  71     push(vt);
  72     return;
  73   } else if (elemptr != NULL && elemptr->is_valuetypeptr() && !elemptr->maybe_null()) {
  74     // Load from non-flattened but flattenable value type array (elements can never be null)
  75     bt = T_VALUETYPE;
  76   } else if (!ary_t->is_not_flat() && !ary_t->klass_is_exact()) {

  77     // Cannot statically determine if array is flattened, emit runtime check
  78     assert(ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() &&
  79            (!elemptr->is_valuetypeptr() || elemptr->value_klass()->flatten_array()), "array can't be flattened");
  80     Node* ctl = control();
  81     IdealKit ideal(this);
  82     IdealVariable res(ideal);
  83     ideal.declarations_done();
  84     Node* kls = load_object_klass(ary);
  85     Node* tag = load_lh_array_tag(kls);
  86     ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); {
  87       // non-flattened
  88       sync_kit(ideal);
  89       const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  90       Node* ld = access_load_at(ary, adr, adr_type, elemptr, bt,
  91                                 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD, ctl);
  92       ideal.sync_kit(this);
  93       ideal.set(res, ld);
  94     } ideal.else_(); {
  95       // flattened
  96       sync_kit(ideal);
  97       if (elemptr->is_valuetypeptr()) {
  98         // Element type is known, cast and load from flattened representation
  99         assert(elemptr->maybe_null(), "must be nullable");
 100         ciValueKlass* vk = elemptr->value_klass();
 101         assert(vk->flatten_array(), "must be flattenable");
 102         ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* never_null */ true);
 103         const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 104         Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 105         adr = array_element_address(cast, idx, T_VALUETYPE, ary_t->size(), control());
 106         Node* vt = ValueTypeNode::make_from_flattened(this, vk, cast, adr)->allocate(this, false, false)->get_oop();
 107         ideal.set(res, vt);
 108         ideal.sync_kit(this);
 109       } else {
 110         // Element type is unknown, emit runtime call

 111         Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset()));
 112         Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
 113         Node* obj_size  = NULL;
 114         kill_dead_locals();
 115         inc_sp(2);
 116         Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
 117         dec_sp(2);
 118 
 119         AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
 120         assert(alloc->maybe_set_complete(&_gvn), "");
 121         alloc->initialization()->set_complete_with_arraycopy();
 122 
 123         // This membar keeps this access to an unknown flattened array
 124         // correctly ordered with other unknown and known flattened
 125         // array accesses.
 126         insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 127 
 128         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 129         // Unknown value type might contain reference fields
 130         if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) {


 198 }
 199 
 200 
 201 //--------------------------------array_store----------------------------------
 202 void Parse::array_store(BasicType bt) {
 203   const Type* elemtype = Type::TOP;
 204   Node* adr = array_addressing(bt, type2size[bt], &elemtype);
 205   if (stopped())  return;     // guaranteed null or range check
 206   Node* cast_val = NULL;
 207   if (bt == T_OBJECT) {
 208     cast_val = array_store_check();
 209     if (stopped()) return;
 210   }
 211   Node* val = pop_node(bt); // Value to store
 212   Node* idx = pop();        // Index in the array
 213   Node* ary = pop();        // The array itself
 214 
 215   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
 216   if (bt == T_OBJECT) {
 217     const TypeOopPtr* elemptr = elemtype->make_oopptr();
 218     const Type* val_t = _gvn.type(cast_val);
 219     if (elemtype->isa_valuetype() != NULL) {
 220       C->set_flattened_accesses();
 221       // Store to flattened value type array
 222       if (!cast_val->is_ValueType()) {
 223         inc_sp(3);
 224         cast_val = null_check(cast_val);
 225         if (stopped()) return;
 226         dec_sp(3);
 227         cast_val = ValueTypeNode::make_from_oop(this, cast_val, elemtype->value_klass());
 228       }
 229       cast_val->as_ValueType()->store_flattened(this, ary, adr);
 230       return;
 231     } else if (elemptr->is_valuetypeptr() && !elemptr->maybe_null()) {
 232       // Store to non-flattened but flattenable value type array (elements can never be null)
 233       if (!cast_val->is_ValueType()) {
 234         inc_sp(3);
 235         cast_val = null_check(cast_val);
 236         if (stopped()) return;
 237         dec_sp(3);
 238       }
 239     } else if (elemptr->can_be_value_type() && !ary_t->klass_is_exact() &&
 240                (cast_val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) {
 241       // Cannot statically determine if array is flattened or null-free, emit runtime checks
 242       ciValueKlass* vk = NULL;
 243       // Try to determine the value klass
 244       if (cast_val->is_ValueType()) {
 245         vk = val_t->value_klass();
 246       } else if (elemptr->is_valuetypeptr()) {
 247         vk = elemptr->value_klass();
 248       }
 249       if (!ary_t->is_not_flat() && (vk == NULL || vk->flatten_array())) {
 250         // Array might be flattened
 251         assert(ValueArrayFlatten && !ary_t->is_not_null_free(), "a null-ok array can't be flattened");
 252         IdealKit ideal(this);
 253         Node *kls = load_object_klass(ary);
 254         Node *layout_val = load_lh_array_tag(kls);
 255         ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value));
 256         {
 257           // non-flattened
 258           sync_kit(ideal);
 259           gen_value_array_null_guard(ary, cast_val, 3);
 260           const TypeAryPtr *adr_type = TypeAryPtr::get_array_body_type(bt);
 261           elemtype = ary_t->elem()->make_oopptr();
 262           access_store_at(ary, adr, adr_type, cast_val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false, false);
 263           ideal.sync_kit(this);
 264         }
 265         ideal.else_();
 266         {
 267           // flattened
 268           if (!cast_val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) {
 269             // Add null check
 270             sync_kit(ideal);
 271             Node *null_ctl = top();
 272             cast_val = null_check_oop(cast_val, &null_ctl);
 273             if (null_ctl != top()) {
 274               PreserveJVMState pjvms(this);
 275               inc_sp(3);
 276               set_control(null_ctl);
 277               uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
 278               dec_sp(3);
 279             }
 280             ideal.sync_kit(this);
 281           }
 282           if (vk != NULL && !stopped()) {
 283             // Element type is known, cast and store to flattened representation
 284             sync_kit(ideal);
 285             assert(vk->flatten_array(), "must be flattenable");
 286             assert(elemptr->maybe_null(), "must be nullable");
 287             ciArrayKlass *array_klass = ciArrayKlass::make(vk, /* never_null */ true);
 288             const TypeAryPtr *arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
 289             ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype));
 290             adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control());
 291             if (!cast_val->is_ValueType()) {
 292               assert(!gvn().type(cast_val)->maybe_null(), "value type array elements should never be null");
 293               cast_val = ValueTypeNode::make_from_oop(this, cast_val, vk);
 294             }
 295             cast_val->as_ValueType()->store_flattened(this, ary, adr);
 296             ideal.sync_kit(this);
 297           } else if (!ideal.ctrl()->is_top()) {
 298             // Element type is unknown, emit runtime call

 299             sync_kit(ideal);
 300 
 301             // This membar keeps this access to an unknown flattened
 302             // array correctly ordered with other unknown and known
 303             // flattened array accesses.
 304             insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 305             ideal.sync_kit(this);
 306 
 307             ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(),
 308                                  CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value),
 309                                  "store_unknown_value",
 310                                  cast_val, ary, idx);
 311 
 312             sync_kit(ideal);
 313             // Same as MemBarCPUOrder above: keep this unknown
 314             // flattened array access correctly ordered with other
 315             // flattened array access
 316             insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::VALUES));
 317             ideal.sync_kit(this);
 318 
 319           }
 320         }
 321         ideal.end_if();
 322         sync_kit(ideal);
 323         return;
 324       } else if (!ary_t->is_not_null_free()) {
 325         // Array is never flattened
 326         gen_value_array_null_guard(ary, cast_val, 3);
 327       }
 328     }
 329   }
 330 
 331   if (elemtype == TypeInt::BOOL) {
 332     bt = T_BOOLEAN;
 333   } else if (bt == T_OBJECT) {
 334     elemtype = ary_t->elem()->make_oopptr();
 335   }
 336 
 337   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 338 
 339   access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 340 }
 341 
 342 
 343 //------------------------------array_addressing-------------------------------
 344 // Pull array and index from the stack.  Compute pointer-to-element.
 345 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
 346   Node *idx   = peek(0+vals);   // Get from stack without popping


 418       set_control(_gvn.transform(new IfFalseNode(rc)));
 419       if (C->allow_range_check_smearing()) {
 420         // Do not use builtin_throw, since range checks are sometimes
 421         // made more stringent by an optimistic transformation.
 422         // This creates "tentative" range checks at this point,
 423         // which are not guaranteed to throw exceptions.
 424         // See IfNode::Ideal, is_range_check, adjust_check.
 425         uncommon_trap(Deoptimization::Reason_range_check,
 426                       Deoptimization::Action_make_not_entrant,
 427                       NULL, "range_check");
 428       } else {
 429         // If we have already recompiled with the range-check-widening
 430         // heroic optimization turned off, then we must really be throwing
 431         // range check exceptions.
 432         builtin_throw(Deoptimization::Reason_range_check, idx);
 433       }
 434     }
 435   }
 436   // Check for always knowing you are throwing a range-check exception
 437   if (stopped())  return top();
 438 
 439   // Speculate on the array not being null-free
 440   if (!arytype->is_not_null_free() && arytype->speculative() != NULL && arytype->speculative()->isa_aryptr() != NULL &&
 441       arytype->speculative()->is_aryptr()->is_not_null_free() &&
 442       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 443     Node* tst = gen_null_free_array_check(ary);
 444     {
 445       BuildCutout unless(this, tst, PROB_ALWAYS);
 446       uncommon_trap(Deoptimization::Reason_speculate_class_check,
 447                     Deoptimization::Action_maybe_recompile);
 448     }
 449     Node* cast = new CheckCastPPNode(control(), ary, arytype->cast_to_not_null_free());
 450     replace_in_map(ary, _gvn.transform(cast));
 451   }
 452 
 453   // Make array address computation control dependent to prevent it
 454   // from floating above the range check during loop optimizations.
 455   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 456 
 457   if (result2 != NULL)  *result2 = elemtype;
 458 
 459   assert(ptr != top(), "top should go hand-in-hand with stopped");
 460 
 461   return ptr;
 462 }
 463 
 464 
 465 // returns IfNode
 466 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 467   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 468   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 469   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 470   return iff;
 471 }


< prev index next >