src/share/vm/opto/parse3.cpp

Print this page
rev 5661 : 8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering.


 211   bool must_assert_null = false;
 212 
 213   if( bt == T_OBJECT ) {
 214     if (!field->type()->is_loaded()) {
 215       type = TypeInstPtr::BOTTOM;
 216       must_assert_null = true;
 217     } else if (field->is_constant() && field->is_static()) {
 218       // This can happen if the constant oop is non-perm.
 219       ciObject* con = field->constant_value().as_object();
 220       // Do not "join" in the previous type; it doesn't add value,
 221       // and may yield a vacuous result if the field is of interface type.
 222       type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 223       assert(type != NULL, "field singleton type must be consistent");
 224     } else {
 225       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 226     }
 227   } else {
 228     type = Type::get_const_basic_type(bt);
 229   }
 230   // Build the load.
 231   Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);


 232 
 233   // Adjust Java stack
 234   if (type2size[bt] == 1)
 235     push(ld);
 236   else
 237     push_pair(ld);
 238 
 239   if (must_assert_null) {
 240     // Do not take a trap here.  It's possible that the program
 241     // will never load the field's class, and will happily see
 242     // null values in this field forever.  Don't stumble into a
 243     // trap for such a program, or we might get a long series
 244     // of useless recompilations.  (Or, we might load a class
 245     // which should not be loaded.)  If we ever see a non-null
 246     // value, we will then trap and recompile.  (The trap will
 247     // not need to mention the class index, since the class will
 248     // already have been loaded if we ever see a non-null value.)
 249     // uncommon_trap(iter().get_field_signature_index());
 250 #ifndef PRODUCT
 251     if (PrintOpto && (Verbose || WizardMode)) {


 271   }
 272 }
 273 
 274 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
 275   bool is_vol = field->is_volatile();
 276   // If reference is volatile, prevent following memory ops from
 277   // floating down past the volatile write.  Also prevents commoning
 278   // another volatile read.
 279   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
 280 
 281   // Compute address and memory type.
 282   int offset = field->offset_in_bytes();
 283   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 284   Node* adr = basic_plus_adr(obj, obj, offset);
 285   BasicType bt = field->layout_type();
 286   // Value to be stored
 287   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 288   // Round doubles before storing
 289   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 290 










 291   // Store the value.
 292   Node* store;
 293   if (bt == T_OBJECT) {
 294     const TypeOopPtr* field_type;
 295     if (!field->type()->is_loaded()) {
 296       field_type = TypeInstPtr::BOTTOM;
 297     } else {
 298       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 299     }
 300     store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt);
 301   } else {
 302     store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
 303   }
 304 
 305   // If reference is volatile, prevent following volatiles ops from
 306   // floating up before the volatile write.
 307   if (is_vol) {
 308     insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 309   }
 310 
 311   // If the field is final, the rules of Java say we are in <init> or <clinit>.
 312   // Note the presence of writes to final non-static fields, so that we
 313   // can insert a memory barrier later on to keep the writes from floating
 314   // out of the constructor.
 315   // Any method can write a @Stable field; insert memory barriers after those also.
 316   if (is_field && (field->is_final() || field->is_stable())) {
 317     set_wrote_final(true);
 318     // Preserve allocation ptr to create precedent edge to it in membar
 319     // generated on exit from constructor.
 320     if (C->eliminate_boxing() &&
 321         adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
 322         AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {


 397   push(obj);
 398 }
 399 
 400 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
 401 // Also handle the degenerate 1-dimensional case of anewarray.
 402 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
 403   Node* length = lengths[0];
 404   assert(length != NULL, "");
 405   Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
 406   if (ndimensions > 1) {
 407     jint length_con = find_int_con(length, -1);
 408     guarantee(length_con >= 0, "non-constant multianewarray");
 409     ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
 410     const TypePtr* adr_type = TypeAryPtr::OOPS;
 411     const TypeOopPtr*    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
 412     const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
 413     for (jint i = 0; i < length_con; i++) {
 414       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
 415       intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
 416       Node*    eaddr  = basic_plus_adr(array, offset);
 417       store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
 418     }
 419   }
 420   return array;
 421 }
 422 
 423 void Parse::do_multianewarray() {
 424   int ndimensions = iter().get_dimensions();
 425 
 426   // the m-dimensional array
 427   bool will_link;
 428   ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
 429   assert(will_link, "multianewarray: typeflow responsibility");
 430 
 431   // Note:  Array classes are always initialized; no is_initialized check.
 432 
 433   kill_dead_locals();
 434 
 435   // get the lengths from the stack (first dimension is on top)
 436   Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
 437   length[ndimensions] = NULL;  // terminating null for make_runtime_call


 486 
 487   if (fun != NULL) {
 488     c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 489                           OptoRuntime::multianewarray_Type(ndimensions),
 490                           fun, NULL, TypeRawPtr::BOTTOM,
 491                           makecon(TypeKlassPtr::make(array_klass)),
 492                           length[0], length[1], length[2],
 493                           (ndimensions > 2) ? length[3] : NULL,
 494                           (ndimensions > 3) ? length[4] : NULL);
 495   } else {
 496     // Create a java array for dimension sizes
 497     Node* dims = NULL;
 498     { PreserveReexecuteState preexecs(this);
 499       inc_sp(ndimensions);
 500       Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
 501       dims = new_array(dims_array_klass, intcon(ndimensions), 0);
 502 
 503       // Fill-in it with values
 504       for (j = 0; j < ndimensions; j++) {
 505         Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
 506         store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS);
 507       }
 508     }
 509 
 510     c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 511                           OptoRuntime::multianewarrayN_Type(),
 512                           OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
 513                           makecon(TypeKlassPtr::make(array_klass)),
 514                           dims);
 515   }
 516   make_slow_call_ex(c, env()->Throwable_klass(), false);
 517 
 518   Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms));
 519 
 520   const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
 521 
 522   // Improve the type:  We know it's not null, exact, and of a given length.
 523   type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
 524   type = type->is_aryptr()->cast_to_exactness(true);
 525 
 526   const TypeInt* ltype = _gvn.find_int_type(length[0]);


 211   bool must_assert_null = false;
 212 
 213   if( bt == T_OBJECT ) {
 214     if (!field->type()->is_loaded()) {
 215       type = TypeInstPtr::BOTTOM;
 216       must_assert_null = true;
 217     } else if (field->is_constant() && field->is_static()) {
 218       // This can happen if the constant oop is non-perm.
 219       ciObject* con = field->constant_value().as_object();
 220       // Do not "join" in the previous type; it doesn't add value,
 221       // and may yield a vacuous result if the field is of interface type.
 222       type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 223       assert(type != NULL, "field singleton type must be consistent");
 224     } else {
 225       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 226     }
 227   } else {
 228     type = Type::get_const_basic_type(bt);
 229   }
 230   // Build the load.
 231   //
 232   LoadNode::Sem sem = is_vol ? LoadNode::acquire : LoadNode::unordered;
 233   Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol, sem);
 234 
 235   // Adjust Java stack
 236   if (type2size[bt] == 1)
 237     push(ld);
 238   else
 239     push_pair(ld);
 240 
 241   if (must_assert_null) {
 242     // Do not take a trap here.  It's possible that the program
 243     // will never load the field's class, and will happily see
 244     // null values in this field forever.  Don't stumble into a
 245     // trap for such a program, or we might get a long series
 246     // of useless recompilations.  (Or, we might load a class
 247     // which should not be loaded.)  If we ever see a non-null
 248     // value, we will then trap and recompile.  (The trap will
 249     // not need to mention the class index, since the class will
 250     // already have been loaded if we ever see a non-null value.)
 251     // uncommon_trap(iter().get_field_signature_index());
 252 #ifndef PRODUCT
 253     if (PrintOpto && (Verbose || WizardMode)) {


 273   }
 274 }
 275 
 276 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
 277   bool is_vol = field->is_volatile();
 278   // If reference is volatile, prevent following memory ops from
 279   // floating down past the volatile write.  Also prevents commoning
 280   // another volatile read.
 281   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
 282 
 283   // Compute address and memory type.
 284   int offset = field->offset_in_bytes();
 285   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 286   Node* adr = basic_plus_adr(obj, obj, offset);
 287   BasicType bt = field->layout_type();
 288   // Value to be stored
 289   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 290   // Round doubles before storing
 291   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 292 
 293   // Conservatively release stores of object references.
 294   const StoreNode::Sem sem =
 295     is_vol ?
 296     // Volatile fields need releasing stores.
 297     StoreNode::release :
 298     // Non-volatile fields also need releasing stores if they hold an
 299     // object reference, because the object reference might point to
 300     // a freshly created object.
 301     StoreNode::release_if_reference(bt);
 302 
 303   // Store the value.
 304   Node* store;
 305   if (bt == T_OBJECT) {
 306     const TypeOopPtr* field_type;
 307     if (!field->type()->is_loaded()) {
 308       field_type = TypeInstPtr::BOTTOM;
 309     } else {
 310       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 311     }
 312     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, sem);
 313   } else {
 314     store = store_to_memory(control(), adr, val, bt, adr_type, is_vol, sem);
 315   }
 316 
 317   // If reference is volatile, prevent following volatiles ops from
 318   // floating up before the volatile write.
 319   if (is_vol) {
 320     insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 321   }
 322 
 323   // If the field is final, the rules of Java say we are in <init> or <clinit>.
 324   // Note the presence of writes to final non-static fields, so that we
 325   // can insert a memory barrier later on to keep the writes from floating
 326   // out of the constructor.
 327   // Any method can write a @Stable field; insert memory barriers after those also.
 328   if (is_field && (field->is_final() || field->is_stable())) {
 329     set_wrote_final(true);
 330     // Preserve allocation ptr to create precedent edge to it in membar
 331     // generated on exit from constructor.
 332     if (C->eliminate_boxing() &&
 333         adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
 334         AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {


 409   push(obj);
 410 }
 411 
 412 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
 413 // Also handle the degenerate 1-dimensional case of anewarray.
 414 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
 415   Node* length = lengths[0];
 416   assert(length != NULL, "");
 417   Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
 418   if (ndimensions > 1) {
 419     jint length_con = find_int_con(length, -1);
 420     guarantee(length_con >= 0, "non-constant multianewarray");
 421     ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
 422     const TypePtr* adr_type = TypeAryPtr::OOPS;
 423     const TypeOopPtr*    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
 424     const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
 425     for (jint i = 0; i < length_con; i++) {
 426       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
 427       intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
 428       Node*    eaddr  = basic_plus_adr(array, offset);
 429       store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, StoreNode::unordered);
 430     }
 431   }
 432   return array;
 433 }
 434 
 435 void Parse::do_multianewarray() {
 436   int ndimensions = iter().get_dimensions();
 437 
 438   // the m-dimensional array
 439   bool will_link;
 440   ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
 441   assert(will_link, "multianewarray: typeflow responsibility");
 442 
 443   // Note:  Array classes are always initialized; no is_initialized check.
 444 
 445   kill_dead_locals();
 446 
 447   // get the lengths from the stack (first dimension is on top)
 448   Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
 449   length[ndimensions] = NULL;  // terminating null for make_runtime_call


 498 
 499   if (fun != NULL) {
 500     c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 501                           OptoRuntime::multianewarray_Type(ndimensions),
 502                           fun, NULL, TypeRawPtr::BOTTOM,
 503                           makecon(TypeKlassPtr::make(array_klass)),
 504                           length[0], length[1], length[2],
 505                           (ndimensions > 2) ? length[3] : NULL,
 506                           (ndimensions > 3) ? length[4] : NULL);
 507   } else {
 508     // Create a java array for dimension sizes
 509     Node* dims = NULL;
 510     { PreserveReexecuteState preexecs(this);
 511       inc_sp(ndimensions);
 512       Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
 513       dims = new_array(dims_array_klass, intcon(ndimensions), 0);
 514 
 515       // Fill-in it with values
 516       for (j = 0; j < ndimensions; j++) {
 517         Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
 518         store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, false, StoreNode::unordered);
 519       }
 520     }
 521 
 522     c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 523                           OptoRuntime::multianewarrayN_Type(),
 524                           OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
 525                           makecon(TypeKlassPtr::make(array_klass)),
 526                           dims);
 527   }
 528   make_slow_call_ex(c, env()->Throwable_klass(), false);
 529 
 530   Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms));
 531 
 532   const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
 533 
 534   // Improve the type:  We know it's not null, exact, and of a given length.
 535   type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
 536   type = type->is_aryptr()->cast_to_exactness(true);
 537 
 538   const TypeInt* ltype = _gvn.find_int_type(length[0]);