1 /* 2 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "interpreter/linkResolver.hpp" 28 #include "memory/universe.inline.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/memnode.hpp" 32 #include "opto/parse.hpp" 33 #include "opto/rootnode.hpp" 34 #include "opto/runtime.hpp" 35 #include "opto/subnode.hpp" 36 #include "runtime/deoptimization.hpp" 37 #include "runtime/handles.inline.hpp" 38 39 //============================================================================= 40 // Helper methods for _get* and _put* bytecodes 41 //============================================================================= 42 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) { 43 // Could be the field_holder's <clinit> method, or <clinit> for a subklass. 44 // Better to check now than to Deoptimize as soon as we execute 45 assert( field->is_static(), "Only check if field is static"); 46 // is_being_initialized() is too generous. It allows access to statics 47 // by threads that are not running the <clinit> before the <clinit> finishes. 48 // return field->holder()->is_being_initialized(); 49 50 // The following restriction is correct but conservative. 51 // It is also desirable to allow compilation of methods called from <clinit> 52 // but this generated code will need to be made safe for execution by 53 // other threads, or the transition from interpreted to compiled code would 54 // need to be guarded. 55 ciInstanceKlass *field_holder = field->holder(); 56 57 bool access_OK = false; 58 if (method->holder()->is_subclass_of(field_holder)) { 59 if (method->is_static()) { 60 if (method->name() == ciSymbol::class_initializer_name()) { 61 // OK to access static fields inside initializer 62 access_OK = true; 63 } 64 } else { 65 if (method->name() == ciSymbol::object_initializer_name()) { 66 // It's also OK to access static fields inside a constructor, 67 // because any thread calling the constructor must first have 68 // synchronized on the class by executing a '_new' bytecode. 69 access_OK = true; 70 } 71 } 72 } 73 74 return access_OK; 75 76 } 77 78 79 void Parse::do_field_access(bool is_get, bool is_field) { 80 bool will_link; 81 ciField* field = iter().get_field(will_link); 82 assert(will_link, "getfield: typeflow responsibility"); 83 84 ciInstanceKlass* field_holder = field->holder(); 85 86 if (is_field == field->is_static()) { 87 // Interpreter will throw java_lang_IncompatibleClassChangeError 88 // Check this before allowing <clinit> methods to access static fields 89 uncommon_trap(Deoptimization::Reason_unhandled, 90 Deoptimization::Action_none); 91 return; 92 } 93 94 if (!is_field && !field_holder->is_initialized()) { 95 if (!static_field_ok_in_clinit(field, method())) { 96 uncommon_trap(Deoptimization::Reason_uninitialized, 97 Deoptimization::Action_reinterpret, 98 NULL, "!static_field_ok_in_clinit"); 99 return; 100 } 101 } 102 103 // Deoptimize on putfield writes to call site target field. 104 if (!is_get && field->is_call_site_target()) { 105 uncommon_trap(Deoptimization::Reason_unhandled, 106 Deoptimization::Action_reinterpret, 107 NULL, "put to call site target field"); 108 return; 109 } 110 111 assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility"); 112 113 // Note: We do not check for an unloaded field type here any more. 114 115 // Generate code for the object pointer. 116 Node* obj; 117 if (is_field) { 118 int obj_depth = is_get ? 0 : field->type()->size(); 119 obj = null_check(peek(obj_depth)); 120 // Compile-time detect of null-exception? 121 if (stopped()) return; 122 123 #ifdef ASSERT 124 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 125 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 126 #endif 127 128 if (is_get) { 129 (void) pop(); // pop receiver before getting 130 do_get_xxx(obj, field, is_field); 131 } else { 132 do_put_xxx(obj, field, is_field); 133 (void) pop(); // pop receiver after putting 134 } 135 } else { 136 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); 137 obj = _gvn.makecon(tip); 138 if (is_get) { 139 do_get_xxx(obj, field, is_field); 140 } else { 141 do_put_xxx(obj, field, is_field); 142 } 143 } 144 } 145 146 147 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { 148 // Does this field have a constant value? If so, just push the value. 149 if (field->is_constant()) { 150 // final or stable field 151 const Type* stable_type = NULL; 152 if (FoldStableValues && field->is_stable()) { 153 stable_type = Type::get_const_type(field->type()); 154 if (field->type()->is_array_klass()) { 155 int stable_dimension = field->type()->as_array_klass()->dimension(); 156 stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension); 157 } 158 } 159 if (field->is_static()) { 160 // final static field 161 if (C->eliminate_boxing()) { 162 // The pointers in the autobox arrays are always non-null. 163 ciSymbol* klass_name = field->holder()->name(); 164 if (field->name() == ciSymbol::cache_field_name() && 165 field->holder()->uses_default_loader() && 166 (klass_name == ciSymbol::java_lang_Character_CharacterCache() || 167 klass_name == ciSymbol::java_lang_Byte_ByteCache() || 168 klass_name == ciSymbol::java_lang_Short_ShortCache() || 169 klass_name == ciSymbol::java_lang_Integer_IntegerCache() || 170 klass_name == ciSymbol::java_lang_Long_LongCache())) { 171 bool require_const = true; 172 bool autobox_cache = true; 173 if (push_constant(field->constant_value(), require_const, autobox_cache)) { 174 return; 175 } 176 } 177 } 178 if (push_constant(field->constant_value(), false, false, stable_type)) 179 return; 180 } else { 181 // final or stable non-static field 182 // Treat final non-static fields of trusted classes (classes in 183 // java.lang.invoke and sun.invoke packages and subpackages) as 184 // compile time constants. 185 if (obj->is_Con()) { 186 const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr(); 187 ciObject* constant_oop = oop_ptr->const_oop(); 188 ciConstant constant = field->constant_value_of(constant_oop); 189 if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) { 190 // fall through to field load; the field is not yet initialized 191 } else { 192 if (push_constant(constant, true, false, stable_type)) 193 return; 194 } 195 } 196 } 197 } 198 199 Node* leading_membar = NULL; 200 ciType* field_klass = field->type(); 201 bool is_vol = field->is_volatile(); 202 203 // Compute address and memory type. 204 int offset = field->offset_in_bytes(); 205 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 206 Node *adr = basic_plus_adr(obj, obj, offset); 207 BasicType bt = field->layout_type(); 208 209 // Build the resultant type of the load 210 const Type *type; 211 212 bool must_assert_null = false; 213 214 if( bt == T_OBJECT ) { 215 if (!field->type()->is_loaded()) { 216 type = TypeInstPtr::BOTTOM; 217 must_assert_null = true; 218 } else if (field->is_constant() && field->is_static()) { 219 // This can happen if the constant oop is non-perm. 220 ciObject* con = field->constant_value().as_object(); 221 // Do not "join" in the previous type; it doesn't add value, 222 // and may yield a vacuous result if the field is of interface type. 223 type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 224 assert(type != NULL, "field singleton type must be consistent"); 225 } else { 226 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 227 } 228 } else { 229 type = Type::get_const_basic_type(bt); 230 } 231 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) { 232 leading_membar = insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier 233 } 234 // Build the load. 235 // 236 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; 237 Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); 238 239 // Adjust Java stack 240 if (type2size[bt] == 1) 241 push(ld); 242 else 243 push_pair(ld); 244 245 if (must_assert_null) { 246 // Do not take a trap here. It's possible that the program 247 // will never load the field's class, and will happily see 248 // null values in this field forever. Don't stumble into a 249 // trap for such a program, or we might get a long series 250 // of useless recompilations. (Or, we might load a class 251 // which should not be loaded.) If we ever see a non-null 252 // value, we will then trap and recompile. (The trap will 253 // not need to mention the class index, since the class will 254 // already have been loaded if we ever see a non-null value.) 255 // uncommon_trap(iter().get_field_signature_index()); 256 #ifndef PRODUCT 257 if (PrintOpto && (Verbose || WizardMode)) { 258 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); 259 } 260 #endif 261 if (C->log() != NULL) { 262 C->log()->elem("assert_null reason='field' klass='%d'", 263 C->log()->identify(field->type())); 264 } 265 // If there is going to be a trap, put it at the next bytecode: 266 set_bci(iter().next_bci()); 267 null_assert(peek()); 268 set_bci(iter().cur_bci()); // put it back 269 } 270 271 // If reference is volatile, prevent following memory ops from 272 // floating up past the volatile read. Also prevents commoning 273 // another volatile read. 274 if (field->is_volatile()) { 275 // Memory barrier includes bogus read of value to force load BEFORE membar 276 assert(leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); 277 Node* mb = insert_mem_bar(Op_MemBarAcquire, ld); 278 mb->as_MemBar()->set_trailing_load(); 279 } 280 } 281 282 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { 283 Node* leading_membar = NULL; 284 bool is_vol = field->is_volatile(); 285 // If reference is volatile, prevent following memory ops from 286 // floating down past the volatile write. Also prevents commoning 287 // another volatile read. 288 if (is_vol) { 289 leading_membar = insert_mem_bar(Op_MemBarRelease); 290 } 291 292 // Compute address and memory type. 293 int offset = field->offset_in_bytes(); 294 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 295 Node* adr = basic_plus_adr(obj, obj, offset); 296 BasicType bt = field->layout_type(); 297 // Value to be stored 298 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 299 // Round doubles before storing 300 if (bt == T_DOUBLE) val = dstore_rounding(val); 301 302 // Conservatively release stores of object references. 303 const MemNode::MemOrd mo = 304 is_vol ? 305 // Volatile fields need releasing stores. 306 MemNode::release : 307 // Non-volatile fields also need releasing stores if they hold an 308 // object reference, because the object reference might point to 309 // a freshly created object. 310 StoreNode::release_if_reference(bt); 311 312 // Store the value. 313 Node* store; 314 if (bt == T_OBJECT) { 315 const TypeOopPtr* field_type; 316 if (!field->type()->is_loaded()) { 317 field_type = TypeInstPtr::BOTTOM; 318 } else { 319 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 320 } 321 store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo); 322 } else { 323 store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol); 324 } 325 326 // If reference is volatile, prevent following volatiles ops from 327 // floating up before the volatile write. 328 if (is_vol) { 329 // If not multiple copy atomic, we do the MemBarVolatile before the load. 330 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 331 Node* mb = insert_mem_bar(Op_MemBarVolatile, store); // Use fat membar 332 MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar()); 333 } 334 // Remember we wrote a volatile field. 335 // For not multiple copy atomic cpu (ppc64) a barrier should be issued 336 // in constructors which have such stores. See do_exits() in parse1.cpp. 337 if (is_field) { 338 set_wrote_volatile(true); 339 } 340 } 341 342 // If the field is final, the rules of Java say we are in <init> or <clinit>. 343 // Note the presence of writes to final non-static fields, so that we 344 // can insert a memory barrier later on to keep the writes from floating 345 // out of the constructor. 346 // Any method can write a @Stable field; insert memory barriers after those also. 347 if (is_field && (field->is_final() || field->is_stable())) { 348 set_wrote_final(true); 349 // Preserve allocation ptr to create precedent edge to it in membar 350 // generated on exit from constructor. 351 if (C->eliminate_boxing() && 352 adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() && 353 AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { 354 set_alloc_with_final(obj); 355 } 356 } 357 } 358 359 360 361 bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache, const Type* stable_type) { 362 const Type* con_type = Type::make_from_constant(constant, require_constant, is_autobox_cache); 363 switch (constant.basic_type()) { 364 case T_ARRAY: 365 case T_OBJECT: 366 // cases: 367 // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0) 368 // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2) 369 // An oop is not scavengable if it is in the perm gen. 370 if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr()) 371 con_type = con_type->join_speculative(stable_type); 372 break; 373 374 case T_ILLEGAL: 375 // Invalid ciConstant returned due to OutOfMemoryError in the CI 376 assert(C->env()->failing(), "otherwise should not see this"); 377 // These always occur because of object types; we are going to 378 // bail out anyway, so make the stack depths match up 379 push( zerocon(T_OBJECT) ); 380 return false; 381 } 382 383 if (con_type == NULL) 384 // we cannot inline the oop, but we can use it later to narrow a type 385 return false; 386 387 push_node(constant.basic_type(), makecon(con_type)); 388 return true; 389 } 390 391 392 //============================================================================= 393 void Parse::do_anewarray() { 394 bool will_link; 395 ciKlass* klass = iter().get_klass(will_link); 396 397 // Uncommon Trap when class that array contains is not loaded 398 // we need the loaded class for the rest of graph; do not 399 // initialize the container class (see Java spec)!!! 400 assert(will_link, "anewarray: typeflow responsibility"); 401 402 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); 403 // Check that array_klass object is loaded 404 if (!array_klass->is_loaded()) { 405 // Generate uncommon_trap for unloaded array_class 406 uncommon_trap(Deoptimization::Reason_unloaded, 407 Deoptimization::Action_reinterpret, 408 array_klass); 409 return; 410 } 411 412 kill_dead_locals(); 413 414 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); 415 Node* count_val = pop(); 416 Node* obj = new_array(makecon(array_klass_type), count_val, 1); 417 push(obj); 418 } 419 420 421 void Parse::do_newarray(BasicType elem_type) { 422 kill_dead_locals(); 423 424 Node* count_val = pop(); 425 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); 426 Node* obj = new_array(makecon(array_klass), count_val, 1); 427 // Push resultant oop onto stack 428 push(obj); 429 } 430 431 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. 432 // Also handle the degenerate 1-dimensional case of anewarray. 433 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { 434 Node* length = lengths[0]; 435 assert(length != NULL, ""); 436 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); 437 if (ndimensions > 1) { 438 jint length_con = find_int_con(length, -1); 439 guarantee(length_con >= 0, "non-constant multianewarray"); 440 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); 441 const TypePtr* adr_type = TypeAryPtr::OOPS; 442 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 443 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 444 for (jint i = 0; i < length_con; i++) { 445 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); 446 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); 447 Node* eaddr = basic_plus_adr(array, offset); 448 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered); 449 } 450 } 451 return array; 452 } 453 454 void Parse::do_multianewarray() { 455 int ndimensions = iter().get_dimensions(); 456 457 // the m-dimensional array 458 bool will_link; 459 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); 460 assert(will_link, "multianewarray: typeflow responsibility"); 461 462 // Note: Array classes are always initialized; no is_initialized check. 463 464 kill_dead_locals(); 465 466 // get the lengths from the stack (first dimension is on top) 467 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); 468 length[ndimensions] = NULL; // terminating null for make_runtime_call 469 int j; 470 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); 471 472 // The original expression was of this form: new T[length0][length1]... 473 // It is often the case that the lengths are small (except the last). 474 // If that happens, use the fast 1-d creator a constant number of times. 475 const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100); 476 jint expand_count = 1; // count of allocations in the expansion 477 jint expand_fanout = 1; // running total fanout 478 for (j = 0; j < ndimensions-1; j++) { 479 jint dim_con = find_int_con(length[j], -1); 480 expand_fanout *= dim_con; 481 expand_count += expand_fanout; // count the level-J sub-arrays 482 if (dim_con <= 0 483 || dim_con > expand_limit 484 || expand_count > expand_limit) { 485 expand_count = 0; 486 break; 487 } 488 } 489 490 // Can use multianewarray instead of [a]newarray if only one dimension, 491 // or if all non-final dimensions are small constants. 492 if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { 493 Node* obj = NULL; 494 // Set the original stack and the reexecute bit for the interpreter 495 // to reexecute the multianewarray bytecode if deoptimization happens. 496 // Do it unconditionally even for one dimension multianewarray. 497 // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() 498 // when AllocateArray node for newarray is created. 499 { PreserveReexecuteState preexecs(this); 500 inc_sp(ndimensions); 501 // Pass 0 as nargs since uncommon trap code does not need to restore stack. 502 obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); 503 } //original reexecute and sp are set back here 504 push(obj); 505 return; 506 } 507 508 address fun = NULL; 509 switch (ndimensions) { 510 case 1: ShouldNotReachHere(); break; 511 case 2: fun = OptoRuntime::multianewarray2_Java(); break; 512 case 3: fun = OptoRuntime::multianewarray3_Java(); break; 513 case 4: fun = OptoRuntime::multianewarray4_Java(); break; 514 case 5: fun = OptoRuntime::multianewarray5_Java(); break; 515 }; 516 Node* c = NULL; 517 518 if (fun != NULL) { 519 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 520 OptoRuntime::multianewarray_Type(ndimensions), 521 fun, NULL, TypeRawPtr::BOTTOM, 522 makecon(TypeKlassPtr::make(array_klass)), 523 length[0], length[1], length[2], 524 (ndimensions > 2) ? length[3] : NULL, 525 (ndimensions > 3) ? length[4] : NULL); 526 } else { 527 // Create a java array for dimension sizes 528 Node* dims = NULL; 529 { PreserveReexecuteState preexecs(this); 530 inc_sp(ndimensions); 531 Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); 532 dims = new_array(dims_array_klass, intcon(ndimensions), 0); 533 534 // Fill-in it with values 535 for (j = 0; j < ndimensions; j++) { 536 Node *dims_elem = array_element_address(dims, intcon(j), T_INT); 537 store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered); 538 } 539 } 540 541 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 542 OptoRuntime::multianewarrayN_Type(), 543 OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM, 544 makecon(TypeKlassPtr::make(array_klass)), 545 dims); 546 } 547 make_slow_call_ex(c, env()->Throwable_klass(), false); 548 549 Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms)); 550 551 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); 552 553 // Improve the type: We know it's not null, exact, and of a given length. 554 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); 555 type = type->is_aryptr()->cast_to_exactness(true); 556 557 const TypeInt* ltype = _gvn.find_int_type(length[0]); 558 if (ltype != NULL) 559 type = type->is_aryptr()->cast_to_size(ltype); 560 561 // We cannot sharpen the nested sub-arrays, since the top level is mutable. 562 563 Node* cast = _gvn.transform( new (C) CheckCastPPNode(control(), res, type) ); 564 push(cast); 565 566 // Possible improvements: 567 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) 568 // - Issue CastII against length[*] values, to TypeInt::POS. 569 }