1 /*
   2  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_parse3.cpp.incl"
  27 
  28 //=============================================================================
  29 // Helper methods for _get* and _put* bytecodes
  30 //=============================================================================
  31 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
  32   // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
  33   // Better to check now than to Deoptimize as soon as we execute
  34   assert( field->is_static(), "Only check if field is static");
  35   // is_being_initialized() is too generous.  It allows access to statics
  36   // by threads that are not running the <clinit> before the <clinit> finishes.
  37   // return field->holder()->is_being_initialized();
  38 
  39   // The following restriction is correct but conservative.
  40   // It is also desirable to allow compilation of methods called from <clinit>
  41   // but this generated code will need to be made safe for execution by
  42   // other threads, or the transition from interpreted to compiled code would
  43   // need to be guarded.
  44   ciInstanceKlass *field_holder = field->holder();
  45 
  46   bool access_OK = false;
  47   if (method->holder()->is_subclass_of(field_holder)) {
  48     if (method->is_static()) {
  49       if (method->name() == ciSymbol::class_initializer_name()) {
  50         // OK to access static fields inside initializer
  51         access_OK = true;
  52       }
  53     } else {
  54       if (method->name() == ciSymbol::object_initializer_name()) {
  55         // It's also OK to access static fields inside a constructor,
  56         // because any thread calling the constructor must first have
  57         // synchronized on the class by executing a '_new' bytecode.
  58         access_OK = true;
  59       }
  60     }
  61   }
  62 
  63   return access_OK;
  64 
  65 }
  66 
  67 
  68 void Parse::do_field_access(bool is_get, bool is_field) {
  69   bool will_link;
  70   ciField* field = iter().get_field(will_link);
  71   assert(will_link, "getfield: typeflow responsibility");
  72 
  73   ciInstanceKlass* field_holder = field->holder();
  74 
  75   if (is_field == field->is_static()) {
  76     // Interpreter will throw java_lang_IncompatibleClassChangeError
  77     // Check this before allowing <clinit> methods to access static fields
  78     uncommon_trap(Deoptimization::Reason_unhandled,
  79                   Deoptimization::Action_none);
  80     return;
  81   }
  82 
  83   if (!is_field && !field_holder->is_initialized()) {
  84     if (!static_field_ok_in_clinit(field, method())) {
  85       uncommon_trap(Deoptimization::Reason_uninitialized,
  86                     Deoptimization::Action_reinterpret,
  87                     NULL, "!static_field_ok_in_clinit");
  88       return;
  89     }
  90   }
  91 
  92   assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");
  93 
  94   // Note:  We do not check for an unloaded field type here any more.
  95 
  96   // Generate code for the object pointer.
  97   Node* obj;
  98   if (is_field) {
  99     int obj_depth = is_get ? 0 : field->type()->size();
 100     obj = do_null_check(peek(obj_depth), T_OBJECT);
 101     // Compile-time detect of null-exception?
 102     if (stopped())  return;
 103 
 104     const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
 105     assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
 106 
 107     if (is_get) {
 108       --_sp;  // pop receiver before getting
 109       do_get_xxx(tjp, obj, field, is_field);
 110     } else {
 111       do_put_xxx(tjp, obj, field, is_field);
 112       --_sp;  // pop receiver after putting
 113     }
 114   } else {
 115     const TypeKlassPtr* tkp = TypeKlassPtr::make(field_holder);
 116     obj = _gvn.makecon(tkp);
 117     if (is_get) {
 118       do_get_xxx(tkp, obj, field, is_field);
 119     } else {
 120       do_put_xxx(tkp, obj, field, is_field);
 121     }
 122   }
 123 }
 124 
 125 
 126 void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
 127   // Does this field have a constant value?  If so, just push the value.
 128   if (field->is_constant()) {
 129     if (field->is_static()) {
 130       // final static field
 131       if (push_constant(field->constant_value()))
 132         return;
 133     }
 134     else {
 135       // final non-static field of a trusted class ({java,sun}.dyn
 136       // classes).
 137       if (obj->is_Con()) {
 138         const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
 139         ciObject* constant_oop = oop_ptr->const_oop();
 140         ciConstant constant = field->constant_value_of(constant_oop);
 141 
 142         if (push_constant(constant, true))
 143           return;
 144       }
 145     }
 146   }
 147 
 148   ciType* field_klass = field->type();
 149   bool is_vol = field->is_volatile();
 150 
 151   // Compute address and memory type.
 152   int offset = field->offset_in_bytes();
 153   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 154   Node *adr = basic_plus_adr(obj, obj, offset);
 155   BasicType bt = field->layout_type();
 156 
 157   // Build the resultant type of the load
 158   const Type *type;
 159 
 160   bool must_assert_null = false;
 161 
 162   if( bt == T_OBJECT ) {
 163     if (!field->type()->is_loaded()) {
 164       type = TypeInstPtr::BOTTOM;
 165       must_assert_null = true;
 166     } else if (field->is_constant() && field->is_static()) {
 167       // This can happen if the constant oop is non-perm.
 168       ciObject* con = field->constant_value().as_object();
 169       // Do not "join" in the previous type; it doesn't add value,
 170       // and may yield a vacuous result if the field is of interface type.
 171       type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 172       assert(type != NULL, "field singleton type must be consistent");
 173     } else {
 174       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 175     }
 176   } else {
 177     type = Type::get_const_basic_type(bt);
 178   }
 179   // Build the load.
 180   Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);
 181 
 182   // Adjust Java stack
 183   if (type2size[bt] == 1)
 184     push(ld);
 185   else
 186     push_pair(ld);
 187 
 188   if (must_assert_null) {
 189     // Do not take a trap here.  It's possible that the program
 190     // will never load the field's class, and will happily see
 191     // null values in this field forever.  Don't stumble into a
 192     // trap for such a program, or we might get a long series
 193     // of useless recompilations.  (Or, we might load a class
 194     // which should not be loaded.)  If we ever see a non-null
 195     // value, we will then trap and recompile.  (The trap will
 196     // not need to mention the class index, since the class will
 197     // already have been loaded if we ever see a non-null value.)
 198     // uncommon_trap(iter().get_field_signature_index());
 199 #ifndef PRODUCT
 200     if (PrintOpto && (Verbose || WizardMode)) {
 201       method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
 202     }
 203 #endif
 204     if (C->log() != NULL) {
 205       C->log()->elem("assert_null reason='field' klass='%d'",
 206                      C->log()->identify(field->type()));
 207     }
 208     // If there is going to be a trap, put it at the next bytecode:
 209     set_bci(iter().next_bci());
 210     do_null_assert(peek(), T_OBJECT);
 211     set_bci(iter().cur_bci()); // put it back
 212   }
 213 
 214   // If reference is volatile, prevent following memory ops from
 215   // floating up past the volatile read.  Also prevents commoning
 216   // another volatile read.
 217   if (field->is_volatile()) {
 218     // Memory barrier includes bogus read of value to force load BEFORE membar
 219     insert_mem_bar(Op_MemBarAcquire, ld);
 220   }
 221 }
 222 
 223 void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
 224   bool is_vol = field->is_volatile();
 225   // If reference is volatile, prevent following memory ops from
 226   // floating down past the volatile write.  Also prevents commoning
 227   // another volatile read.
 228   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
 229 
 230   // Compute address and memory type.
 231   int offset = field->offset_in_bytes();
 232   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 233   Node* adr = basic_plus_adr(obj, obj, offset);
 234   BasicType bt = field->layout_type();
 235   // Value to be stored
 236   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 237   // Round doubles before storing
 238   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 239 
 240   // Store the value.
 241   Node* store;
 242   if (bt == T_OBJECT) {
 243     const TypeOopPtr* field_type;
 244     if (!field->type()->is_loaded()) {
 245       field_type = TypeInstPtr::BOTTOM;
 246     } else {
 247       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 248     }
 249     store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt);
 250   } else {
 251     store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
 252   }
 253 
 254   // If reference is volatile, prevent following volatiles ops from
 255   // floating up before the volatile write.
 256   if (is_vol) {
 257     // First place the specific membar for THIS volatile index. This first
 258     // membar is dependent on the store, keeping any other membars generated
 259     // below from floating up past the store.
 260     int adr_idx = C->get_alias_index(adr_type);
 261     insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store);
 262 
 263     // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
 264     // volatile alias indices. Skip this if the membar is redundant.
 265     if (adr_idx != Compile::AliasIdxBot) {
 266       insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store);
 267     }
 268 
 269     // Finally, place alias-index-specific membars for each volatile index
 270     // that isn't the adr_idx membar. Typically there's only 1 or 2.
 271     for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
 272       if (i != adr_idx && C->alias_type(i)->is_volatile()) {
 273         insert_mem_bar_volatile(Op_MemBarVolatile, i, store);
 274       }
 275     }
 276   }
 277 
 278   // If the field is final, the rules of Java say we are in <init> or <clinit>.
 279   // Note the presence of writes to final non-static fields, so that we
 280   // can insert a memory barrier later on to keep the writes from floating
 281   // out of the constructor.
 282   if (is_field && field->is_final()) {
 283     set_wrote_final(true);
 284   }
 285 }
 286 
 287 
 288 bool Parse::push_constant(ciConstant constant, bool require_constant) {
 289   switch (constant.basic_type()) {
 290   case T_BOOLEAN:  push( intcon(constant.as_boolean()) ); break;
 291   case T_INT:      push( intcon(constant.as_int())     ); break;
 292   case T_CHAR:     push( intcon(constant.as_char())    ); break;
 293   case T_BYTE:     push( intcon(constant.as_byte())    ); break;
 294   case T_SHORT:    push( intcon(constant.as_short())   ); break;
 295   case T_FLOAT:    push( makecon(TypeF::make(constant.as_float())) );  break;
 296   case T_DOUBLE:   push_pair( makecon(TypeD::make(constant.as_double())) );  break;
 297   case T_LONG:     push_pair( longcon(constant.as_long()) ); break;
 298   case T_ARRAY:
 299   case T_OBJECT: {
 300     // cases:
 301     //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
 302     //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
 303     // An oop is not scavengable if it is in the perm gen.
 304     ciObject* oop_constant = constant.as_object();
 305     if (oop_constant->is_null_object()) {
 306       push( zerocon(T_OBJECT) );
 307       break;
 308     } else if (require_constant || oop_constant->should_be_constant()) {
 309       push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
 310       break;
 311     } else {
 312       // we cannot inline the oop, but we can use it later to narrow a type
 313       return false;
 314     }
 315   }
 316   case T_ILLEGAL: {
 317     // Invalid ciConstant returned due to OutOfMemoryError in the CI
 318     assert(C->env()->failing(), "otherwise should not see this");
 319     // These always occur because of object types; we are going to
 320     // bail out anyway, so make the stack depths match up
 321     push( zerocon(T_OBJECT) );
 322     return false;
 323   }
 324   default:
 325     ShouldNotReachHere();
 326     return false;
 327   }
 328 
 329   // success
 330   return true;
 331 }
 332 
 333 
 334 
 335 //=============================================================================
 336 void Parse::do_anewarray() {
 337   bool will_link;
 338   ciKlass* klass = iter().get_klass(will_link);
 339 
 340   // Uncommon Trap when class that array contains is not loaded
 341   // we need the loaded class for the rest of graph; do not
 342   // initialize the container class (see Java spec)!!!
 343   assert(will_link, "anewarray: typeflow responsibility");
 344 
 345   ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
 346   // Check that array_klass object is loaded
 347   if (!array_klass->is_loaded()) {
 348     // Generate uncommon_trap for unloaded array_class
 349     uncommon_trap(Deoptimization::Reason_unloaded,
 350                   Deoptimization::Action_reinterpret,
 351                   array_klass);
 352     return;
 353   }
 354 
 355   kill_dead_locals();
 356 
 357   const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
 358   Node* count_val = pop();
 359   Node* obj = new_array(makecon(array_klass_type), count_val, 1);
 360   push(obj);
 361 }
 362 
 363 
 364 void Parse::do_newarray(BasicType elem_type) {
 365   kill_dead_locals();
 366 
 367   Node*   count_val = pop();
 368   const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
 369   Node*   obj = new_array(makecon(array_klass), count_val, 1);
 370   // Push resultant oop onto stack
 371   push(obj);
 372 }
 373 
 374 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
 375 // Also handle the degenerate 1-dimensional case of anewarray.
 376 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
 377   Node* length = lengths[0];
 378   assert(length != NULL, "");
 379   Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
 380   if (ndimensions > 1) {
 381     jint length_con = find_int_con(length, -1);
 382     guarantee(length_con >= 0, "non-constant multianewarray");
 383     ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
 384     const TypePtr* adr_type = TypeAryPtr::OOPS;
 385     const TypeOopPtr*    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
 386     const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
 387     for (jint i = 0; i < length_con; i++) {
 388       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
 389       intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
 390       Node*    eaddr  = basic_plus_adr(array, offset);
 391       store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
 392     }
 393   }
 394   return array;
 395 }
 396 
 397 void Parse::do_multianewarray() {
 398   int ndimensions = iter().get_dimensions();
 399 
 400   // the m-dimensional array
 401   bool will_link;
 402   ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
 403   assert(will_link, "multianewarray: typeflow responsibility");
 404 
 405   // Note:  Array classes are always initialized; no is_initialized check.
 406 
 407   enum { MAX_DIMENSION = 5 };
 408   if (ndimensions > MAX_DIMENSION || ndimensions <= 0) {
 409     uncommon_trap(Deoptimization::Reason_unhandled,
 410                   Deoptimization::Action_none);
 411     return;
 412   }
 413 
 414   kill_dead_locals();
 415 
 416   // get the lengths from the stack (first dimension is on top)
 417   Node* length[MAX_DIMENSION+1];
 418   length[ndimensions] = NULL;  // terminating null for make_runtime_call
 419   int j;
 420   for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
 421 
 422   // The original expression was of this form: new T[length0][length1]...
 423   // It is often the case that the lengths are small (except the last).
 424   // If that happens, use the fast 1-d creator a constant number of times.
 425   const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100);
 426   jint expand_count = 1;        // count of allocations in the expansion
 427   jint expand_fanout = 1;       // running total fanout
 428   for (j = 0; j < ndimensions-1; j++) {
 429     jint dim_con = find_int_con(length[j], -1);
 430     expand_fanout *= dim_con;
 431     expand_count  += expand_fanout; // count the level-J sub-arrays
 432     if (dim_con <= 0
 433         || dim_con > expand_limit
 434         || expand_count > expand_limit) {
 435       expand_count = 0;
 436       break;
 437     }
 438   }
 439 
 440   // Can use multianewarray instead of [a]newarray if only one dimension,
 441   // or if all non-final dimensions are small constants.
 442   if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
 443     Node* obj = NULL;
 444     // Set the original stack and the reexecute bit for the interpreter
 445     // to reexecute the multianewarray bytecode if deoptimization happens.
 446     // Do it unconditionally even for one dimension multianewarray.
 447     // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
 448     // when AllocateArray node for newarray is created.
 449     { PreserveReexecuteState preexecs(this);
 450       _sp += ndimensions;
 451       // Pass 0 as nargs since uncommon trap code does not need to restore stack.
 452       obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
 453     } //original reexecute and sp are set back here
 454     push(obj);
 455     return;
 456   }
 457 
 458   address fun = NULL;
 459   switch (ndimensions) {
 460   //case 1: Actually, there is no case 1.  It's handled by new_array.
 461   case 2: fun = OptoRuntime::multianewarray2_Java(); break;
 462   case 3: fun = OptoRuntime::multianewarray3_Java(); break;
 463   case 4: fun = OptoRuntime::multianewarray4_Java(); break;
 464   case 5: fun = OptoRuntime::multianewarray5_Java(); break;
 465   default: ShouldNotReachHere();
 466   };
 467 
 468   Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 469                               OptoRuntime::multianewarray_Type(ndimensions),
 470                               fun, NULL, TypeRawPtr::BOTTOM,
 471                               makecon(TypeKlassPtr::make(array_klass)),
 472                               length[0], length[1], length[2],
 473                               length[3], length[4]);
 474   Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms));
 475 
 476   const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
 477 
 478   // Improve the type:  We know it's not null, exact, and of a given length.
 479   type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
 480   type = type->is_aryptr()->cast_to_exactness(true);
 481 
 482   const TypeInt* ltype = _gvn.find_int_type(length[0]);
 483   if (ltype != NULL)
 484     type = type->is_aryptr()->cast_to_size(ltype);
 485 
 486   // We cannot sharpen the nested sub-arrays, since the top level is mutable.
 487 
 488   Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) );
 489   push(cast);
 490 
 491   // Possible improvements:
 492   // - Make a fast path for small multi-arrays.  (W/ implicit init. loops.)
 493   // - Issue CastII against length[*] values, to TypeInt::POS.
 494 }