1 /* 2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "interpreter/linkResolver.hpp" 28 #include "memory/universe.inline.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/memnode.hpp" 32 #include "opto/parse.hpp" 33 #include "opto/rootnode.hpp" 34 #include "opto/runtime.hpp" 35 #include "opto/subnode.hpp" 36 #include "runtime/deoptimization.hpp" 37 #include "runtime/handles.inline.hpp" 38 39 //============================================================================= 40 // Helper methods for _get* and _put* bytecodes 41 //============================================================================= 42 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) { 43 // Could be the field_holder's <clinit> method, or <clinit> for a subklass. 44 // Better to check now than to Deoptimize as soon as we execute 45 assert( field->is_static(), "Only check if field is static"); 46 // is_being_initialized() is too generous. It allows access to statics 47 // by threads that are not running the <clinit> before the <clinit> finishes. 48 // return field->holder()->is_being_initialized(); 49 50 // The following restriction is correct but conservative. 51 // It is also desirable to allow compilation of methods called from <clinit> 52 // but this generated code will need to be made safe for execution by 53 // other threads, or the transition from interpreted to compiled code would 54 // need to be guarded. 55 ciInstanceKlass *field_holder = field->holder(); 56 57 bool access_OK = false; 58 if (method->holder()->is_subclass_of(field_holder)) { 59 if (method->is_static()) { 60 if (method->name() == ciSymbol::class_initializer_name()) { 61 // OK to access static fields inside initializer 62 access_OK = true; 63 } 64 } else { 65 if (method->name() == ciSymbol::object_initializer_name()) { 66 // It's also OK to access static fields inside a constructor, 67 // because any thread calling the constructor must first have 68 // synchronized on the class by executing a '_new' bytecode. 69 access_OK = true; 70 } 71 } 72 } 73 74 return access_OK; 75 76 } 77 78 79 void Parse::do_field_access(bool is_get, bool is_field) { 80 bool will_link; 81 ciField* field = iter().get_field(will_link); 82 assert(will_link, "getfield: typeflow responsibility"); 83 84 ciInstanceKlass* field_holder = field->holder(); 85 86 if (is_field == field->is_static()) { 87 // Interpreter will throw java_lang_IncompatibleClassChangeError 88 // Check this before allowing <clinit> methods to access static fields 89 uncommon_trap(Deoptimization::Reason_unhandled, 90 Deoptimization::Action_none); 91 return; 92 } 93 94 if (!is_field && !field_holder->is_initialized()) { 95 if (!static_field_ok_in_clinit(field, method())) { 96 uncommon_trap(Deoptimization::Reason_uninitialized, 97 Deoptimization::Action_reinterpret, 98 NULL, "!static_field_ok_in_clinit"); 99 return; 100 } 101 } 102 103 assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility"); 104 105 // Note: We do not check for an unloaded field type here any more. 106 107 // Generate code for the object pointer. 108 Node* obj; 109 if (is_field) { 110 int obj_depth = is_get ? 0 : field->type()->size(); 111 obj = do_null_check(peek(obj_depth), T_OBJECT); 112 // Compile-time detect of null-exception? 113 if (stopped()) return; 114 115 #ifdef ASSERT 116 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 117 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 118 #endif 119 120 if (is_get) { 121 --_sp; // pop receiver before getting 122 do_get_xxx(obj, field, is_field); 123 } else { 124 do_put_xxx(obj, field, is_field); 125 --_sp; // pop receiver after putting 126 } 127 } else { 128 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); 129 obj = _gvn.makecon(tip); 130 if (is_get) { 131 do_get_xxx(obj, field, is_field); 132 } else { 133 do_put_xxx(obj, field, is_field); 134 } 135 } 136 } 137 138 139 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { 140 // Does this field have a constant value? If so, just push the value. 141 if (field->is_constant()) { 142 if (field->is_static()) { 143 // final static field 144 if (push_constant(field->constant_value())) 145 return; 146 } 147 else { 148 // final non-static field of a trusted class (classes in 149 // java.lang.invoke and sun.invoke packages and subpackages). 150 if (obj->is_Con()) { 151 const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr(); 152 ciObject* constant_oop = oop_ptr->const_oop(); 153 ciConstant constant = field->constant_value_of(constant_oop); 154 155 if (push_constant(constant, true)) 156 return; 157 } 158 } 159 } 160 161 ciType* field_klass = field->type(); 162 bool is_vol = field->is_volatile(); 163 164 // Compute address and memory type. 165 int offset = field->offset_in_bytes(); 166 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 167 Node *adr = basic_plus_adr(obj, obj, offset); 168 BasicType bt = field->layout_type(); 169 170 // Build the resultant type of the load 171 const Type *type; 172 173 bool must_assert_null = false; 174 175 if( bt == T_OBJECT ) { 176 if (!field->type()->is_loaded()) { 177 type = TypeInstPtr::BOTTOM; 178 must_assert_null = true; 179 } else if (field->is_constant() && field->is_static()) { 180 // This can happen if the constant oop is non-perm. 181 ciObject* con = field->constant_value().as_object(); 182 // Do not "join" in the previous type; it doesn't add value, 183 // and may yield a vacuous result if the field is of interface type. 184 type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 185 assert(type != NULL, "field singleton type must be consistent"); 186 } else { 187 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 188 } 189 } else { 190 type = Type::get_const_basic_type(bt); 191 } 192 // Build the load. 193 Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol); 194 195 // Adjust Java stack 196 if (type2size[bt] == 1) 197 push(ld); 198 else 199 push_pair(ld); 200 201 if (must_assert_null) { 202 // Do not take a trap here. It's possible that the program 203 // will never load the field's class, and will happily see 204 // null values in this field forever. Don't stumble into a 205 // trap for such a program, or we might get a long series 206 // of useless recompilations. (Or, we might load a class 207 // which should not be loaded.) If we ever see a non-null 208 // value, we will then trap and recompile. (The trap will 209 // not need to mention the class index, since the class will 210 // already have been loaded if we ever see a non-null value.) 211 // uncommon_trap(iter().get_field_signature_index()); 212 #ifndef PRODUCT 213 if (PrintOpto && (Verbose || WizardMode)) { 214 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); 215 } 216 #endif 217 if (C->log() != NULL) { 218 C->log()->elem("assert_null reason='field' klass='%d'", 219 C->log()->identify(field->type())); 220 } 221 // If there is going to be a trap, put it at the next bytecode: 222 set_bci(iter().next_bci()); 223 do_null_assert(peek(), T_OBJECT); 224 set_bci(iter().cur_bci()); // put it back 225 } 226 227 // If reference is volatile, prevent following memory ops from 228 // floating up past the volatile read. Also prevents commoning 229 // another volatile read. 230 if (field->is_volatile()) { 231 // Memory barrier includes bogus read of value to force load BEFORE membar 232 insert_mem_bar(Op_MemBarAcquire, ld); 233 } 234 } 235 236 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { 237 bool is_vol = field->is_volatile(); 238 // If reference is volatile, prevent following memory ops from 239 // floating down past the volatile write. Also prevents commoning 240 // another volatile read. 241 if (is_vol) insert_mem_bar(Op_MemBarRelease); 242 243 // Compute address and memory type. 244 int offset = field->offset_in_bytes(); 245 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 246 Node* adr = basic_plus_adr(obj, obj, offset); 247 BasicType bt = field->layout_type(); 248 // Value to be stored 249 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 250 // Round doubles before storing 251 if (bt == T_DOUBLE) val = dstore_rounding(val); 252 253 // Store the value. 254 Node* store; 255 if (bt == T_OBJECT) { 256 const TypeOopPtr* field_type; 257 if (!field->type()->is_loaded()) { 258 field_type = TypeInstPtr::BOTTOM; 259 } else { 260 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 261 } 262 store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt); 263 } else { 264 store = store_to_memory( control(), adr, val, bt, adr_type, is_vol ); 265 } 266 267 // If reference is volatile, prevent following volatiles ops from 268 // floating up before the volatile write. 269 if (is_vol) { 270 // First place the specific membar for THIS volatile index. This first 271 // membar is dependent on the store, keeping any other membars generated 272 // below from floating up past the store. 273 int adr_idx = C->get_alias_index(adr_type); 274 insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store); 275 276 // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed 277 // volatile alias indices. Skip this if the membar is redundant. 278 if (adr_idx != Compile::AliasIdxBot) { 279 insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store); 280 } 281 282 // Finally, place alias-index-specific membars for each volatile index 283 // that isn't the adr_idx membar. Typically there's only 1 or 2. 284 for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) { 285 if (i != adr_idx && C->alias_type(i)->is_volatile()) { 286 insert_mem_bar_volatile(Op_MemBarVolatile, i, store); 287 } 288 } 289 } 290 291 // If the field is final, the rules of Java say we are in <init> or <clinit>. 292 // Note the presence of writes to final non-static fields, so that we 293 // can insert a memory barrier later on to keep the writes from floating 294 // out of the constructor. 295 if (is_field && field->is_final()) { 296 set_wrote_final(true); 297 } 298 } 299 300 301 bool Parse::push_constant(ciConstant constant, bool require_constant) { 302 switch (constant.basic_type()) { 303 case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break; 304 case T_INT: push( intcon(constant.as_int()) ); break; 305 case T_CHAR: push( intcon(constant.as_char()) ); break; 306 case T_BYTE: push( intcon(constant.as_byte()) ); break; 307 case T_SHORT: push( intcon(constant.as_short()) ); break; 308 case T_FLOAT: push( makecon(TypeF::make(constant.as_float())) ); break; 309 case T_DOUBLE: push_pair( makecon(TypeD::make(constant.as_double())) ); break; 310 case T_LONG: push_pair( longcon(constant.as_long()) ); break; 311 case T_ARRAY: 312 case T_OBJECT: { 313 // cases: 314 // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0) 315 // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2) 316 // An oop is not scavengable if it is in the perm gen. 317 ciObject* oop_constant = constant.as_object(); 318 if (oop_constant->is_null_object()) { 319 push( zerocon(T_OBJECT) ); 320 break; 321 } else if (require_constant || oop_constant->should_be_constant()) { 322 push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) ); 323 break; 324 } else { 325 // we cannot inline the oop, but we can use it later to narrow a type 326 return false; 327 } 328 } 329 case T_ILLEGAL: { 330 // Invalid ciConstant returned due to OutOfMemoryError in the CI 331 assert(C->env()->failing(), "otherwise should not see this"); 332 // These always occur because of object types; we are going to 333 // bail out anyway, so make the stack depths match up 334 push( zerocon(T_OBJECT) ); 335 return false; 336 } 337 default: 338 ShouldNotReachHere(); 339 return false; 340 } 341 342 // success 343 return true; 344 } 345 346 347 348 //============================================================================= 349 void Parse::do_anewarray() { 350 bool will_link; 351 ciKlass* klass = iter().get_klass(will_link); 352 353 // Uncommon Trap when class that array contains is not loaded 354 // we need the loaded class for the rest of graph; do not 355 // initialize the container class (see Java spec)!!! 356 assert(will_link, "anewarray: typeflow responsibility"); 357 358 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); 359 // Check that array_klass object is loaded 360 if (!array_klass->is_loaded()) { 361 // Generate uncommon_trap for unloaded array_class 362 uncommon_trap(Deoptimization::Reason_unloaded, 363 Deoptimization::Action_reinterpret, 364 array_klass); 365 return; 366 } 367 368 kill_dead_locals(); 369 370 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); 371 Node* count_val = pop(); 372 Node* obj = new_array(makecon(array_klass_type), count_val, 1); 373 push(obj); 374 } 375 376 377 void Parse::do_newarray(BasicType elem_type) { 378 kill_dead_locals(); 379 380 Node* count_val = pop(); 381 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); 382 Node* obj = new_array(makecon(array_klass), count_val, 1); 383 // Push resultant oop onto stack 384 push(obj); 385 } 386 387 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. 388 // Also handle the degenerate 1-dimensional case of anewarray. 389 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { 390 Node* length = lengths[0]; 391 assert(length != NULL, ""); 392 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); 393 if (ndimensions > 1) { 394 jint length_con = find_int_con(length, -1); 395 guarantee(length_con >= 0, "non-constant multianewarray"); 396 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); 397 const TypePtr* adr_type = TypeAryPtr::OOPS; 398 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 399 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 400 for (jint i = 0; i < length_con; i++) { 401 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); 402 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); 403 Node* eaddr = basic_plus_adr(array, offset); 404 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT); 405 } 406 } 407 return array; 408 } 409 410 void Parse::do_multianewarray() { 411 int ndimensions = iter().get_dimensions(); 412 413 // the m-dimensional array 414 bool will_link; 415 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); 416 assert(will_link, "multianewarray: typeflow responsibility"); 417 418 // Note: Array classes are always initialized; no is_initialized check. 419 420 kill_dead_locals(); 421 422 // get the lengths from the stack (first dimension is on top) 423 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); 424 length[ndimensions] = NULL; // terminating null for make_runtime_call 425 int j; 426 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); 427 428 // The original expression was of this form: new T[length0][length1]... 429 // It is often the case that the lengths are small (except the last). 430 // If that happens, use the fast 1-d creator a constant number of times. 431 const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100); 432 jint expand_count = 1; // count of allocations in the expansion 433 jint expand_fanout = 1; // running total fanout 434 for (j = 0; j < ndimensions-1; j++) { 435 jint dim_con = find_int_con(length[j], -1); 436 expand_fanout *= dim_con; 437 expand_count += expand_fanout; // count the level-J sub-arrays 438 if (dim_con <= 0 439 || dim_con > expand_limit 440 || expand_count > expand_limit) { 441 expand_count = 0; 442 break; 443 } 444 } 445 446 // Can use multianewarray instead of [a]newarray if only one dimension, 447 // or if all non-final dimensions are small constants. 448 if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { 449 Node* obj = NULL; 450 // Set the original stack and the reexecute bit for the interpreter 451 // to reexecute the multianewarray bytecode if deoptimization happens. 452 // Do it unconditionally even for one dimension multianewarray. 453 // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() 454 // when AllocateArray node for newarray is created. 455 { PreserveReexecuteState preexecs(this); 456 _sp += ndimensions; 457 // Pass 0 as nargs since uncommon trap code does not need to restore stack. 458 obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); 459 } //original reexecute and sp are set back here 460 push(obj); 461 return; 462 } 463 464 address fun = NULL; 465 switch (ndimensions) { 466 //case 1: Actually, there is no case 1. It's handled by new_array. 467 case 2: fun = OptoRuntime::multianewarray2_Java(); break; 468 case 3: fun = OptoRuntime::multianewarray3_Java(); break; 469 case 4: fun = OptoRuntime::multianewarray4_Java(); break; 470 case 5: fun = OptoRuntime::multianewarray5_Java(); break; 471 }; 472 Node* c = NULL; 473 474 if (fun != NULL) { 475 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 476 OptoRuntime::multianewarray_Type(ndimensions), 477 fun, NULL, TypeRawPtr::BOTTOM, 478 makecon(TypeKlassPtr::make(array_klass)), 479 length[0], length[1], length[2], 480 length[3], length[4]); 481 } else { 482 // Create a java array for dimention sizes 483 Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); 484 Node* dims = new_array(dims_array_klass, intcon(ndimensions), 0); 485 486 // Fill-in it with values 487 for (j = 0; j < ndimensions; j++) { 488 Node *dims_elem = array_element_address(dims, intcon(j), T_INT); 489 store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS); 490 } 491 492 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 493 OptoRuntime::multianewarrayN_Type(), 494 OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM, 495 makecon(TypeKlassPtr::make(array_klass)), 496 dims); 497 } 498 499 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms)); 500 501 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); 502 503 // Improve the type: We know it's not null, exact, and of a given length. 504 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); 505 type = type->is_aryptr()->cast_to_exactness(true); 506 507 const TypeInt* ltype = _gvn.find_int_type(length[0]); 508 if (ltype != NULL) 509 type = type->is_aryptr()->cast_to_size(ltype); 510 511 // We cannot sharpen the nested sub-arrays, since the top level is mutable. 512 513 Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) ); 514 push(cast); 515 516 // Possible improvements: 517 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) 518 // - Issue CastII against length[*] values, to TypeInt::POS. 519 }