1 /* 2 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "interpreter/linkResolver.hpp" 28 #include "memory/universe.inline.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/castnode.hpp" 32 #include "opto/memnode.hpp" 33 #include "opto/parse.hpp" 34 #include "opto/rootnode.hpp" 35 #include "opto/runtime.hpp" 36 #include "opto/subnode.hpp" 37 #include "opto/valuetypenode.hpp" 38 #include "runtime/deoptimization.hpp" 39 #include "runtime/handles.inline.hpp" 40 41 //============================================================================= 42 // Helper methods for _get* and _put* bytecodes 43 //============================================================================= 44 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) { 45 // Could be the field_holder's <clinit> method, or <clinit> for a subklass. 46 // Better to check now than to Deoptimize as soon as we execute 47 assert( field->is_static(), "Only check if field is static"); 48 // is_being_initialized() is too generous. It allows access to statics 49 // by threads that are not running the <clinit> before the <clinit> finishes. 50 // return field->holder()->is_being_initialized(); 51 52 // The following restriction is correct but conservative. 53 // It is also desirable to allow compilation of methods called from <clinit> 54 // but this generated code will need to be made safe for execution by 55 // other threads, or the transition from interpreted to compiled code would 56 // need to be guarded. 57 ciInstanceKlass *field_holder = field->holder(); 58 59 bool access_OK = false; 60 if (method->holder()->is_subclass_of(field_holder)) { 61 if (method->is_static()) { 62 if (method->name() == ciSymbol::class_initializer_name()) { 63 // OK to access static fields inside initializer 64 access_OK = true; 65 } 66 } else { 67 if (method->name() == ciSymbol::object_initializer_name()) { 68 // It's also OK to access static fields inside a constructor, 69 // because any thread calling the constructor must first have 70 // synchronized on the class by executing a '_new' bytecode. 71 access_OK = true; 72 } 73 } 74 } 75 76 return access_OK; 77 78 } 79 80 81 void Parse::do_field_access(bool is_get, bool is_field) { 82 bool will_link; 83 ciField* field = iter().get_field(will_link); 84 assert(will_link, "getfield: typeflow responsibility"); 85 86 ciInstanceKlass* field_holder = field->holder(); 87 88 if (is_field == field->is_static()) { 89 // Interpreter will throw java_lang_IncompatibleClassChangeError 90 // Check this before allowing <clinit> methods to access static fields 91 uncommon_trap(Deoptimization::Reason_unhandled, 92 Deoptimization::Action_none); 93 return; 94 } 95 96 if (!is_field && !field_holder->is_initialized()) { 97 if (!static_field_ok_in_clinit(field, method())) { 98 uncommon_trap(Deoptimization::Reason_uninitialized, 99 Deoptimization::Action_reinterpret, 100 NULL, "!static_field_ok_in_clinit"); 101 return; 102 } 103 } 104 105 // Deoptimize on putfield writes to call site target field. 106 if (!is_get && field->is_call_site_target()) { 107 uncommon_trap(Deoptimization::Reason_unhandled, 108 Deoptimization::Action_reinterpret, 109 NULL, "put to call site target field"); 110 return; 111 } 112 113 assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility"); 114 115 // Note: We do not check for an unloaded field type here any more. 116 117 // Generate code for the object pointer. 118 Node* obj; 119 if (is_field) { 120 int obj_depth = is_get ? 0 : field->type()->size(); 121 obj = null_check(peek(obj_depth)); 122 // Compile-time detect of null-exception? 123 if (stopped()) return; 124 125 #ifdef ASSERT 126 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 127 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 128 #endif 129 130 if (is_get) { 131 (void) pop(); // pop receiver before getting 132 do_get_xxx(obj, field, is_field); 133 } else { 134 do_put_xxx(obj, field, is_field); 135 (void) pop(); // pop receiver after putting 136 } 137 } else { 138 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); 139 obj = _gvn.makecon(tip); 140 if (is_get) { 141 do_get_xxx(obj, field, is_field); 142 } else { 143 do_put_xxx(obj, field, is_field); 144 } 145 } 146 } 147 148 void Parse::do_vgetfield() { 149 // fixme null/top check? 150 bool will_link; 151 ciField* field = iter().get_field(will_link); 152 BasicType bt = field->layout_type(); 153 ValueTypeNode* vt = pop()->as_ValueType(); 154 Node* value = vt->field_value_by_offset(field->offset()); 155 push_node(bt, value); 156 } 157 158 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { 159 BasicType bt = field->layout_type(); 160 // Does this field have a constant value? If so, just push the value. 161 if (field->is_constant()) { 162 // final or stable field 163 const Type* con_type = Type::make_constant(field, obj); 164 if (con_type != NULL) { 165 Node* con = makecon(con_type); 166 if (bt == T_VALUETYPE) { 167 // Load value type from constant oop 168 con = ValueTypeNode::make(gvn(), map()->memory(), con); 169 } 170 push_node(con_type->basic_type(), con); 171 return; 172 } 173 } 174 175 ciType* field_klass = field->type(); 176 bool is_vol = field->is_volatile(); 177 178 // Compute address and memory type. 179 int offset = field->offset_in_bytes(); 180 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 181 Node *adr = basic_plus_adr(obj, obj, offset); 182 183 // Build the resultant type of the load 184 const Type *type; 185 186 bool must_assert_null = false; 187 188 if (bt == T_OBJECT || bt == T_VALUETYPE) { 189 if (!field->type()->is_loaded()) { 190 type = TypeInstPtr::BOTTOM; 191 must_assert_null = true; 192 } else if (field->is_constant() && field->is_static()) { 193 // This can happen if the constant oop is non-perm. 194 ciObject* con = field->constant_value().as_object(); 195 // Do not "join" in the previous type; it doesn't add value, 196 // and may yield a vacuous result if the field is of interface type. 197 type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 198 assert(type != NULL, "field singleton type must be consistent"); 199 } else { 200 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 201 } 202 } else { 203 type = Type::get_const_basic_type(bt); 204 } 205 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) { 206 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier 207 } 208 209 // Build the load. 210 // 211 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; 212 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; 213 Node* ld = NULL; 214 if (bt == T_VALUETYPE && !field->is_static()) { 215 // Load flattened value type from non-static field 216 ld = ValueTypeNode::make(_gvn, field_klass->as_value_klass(), map()->memory(), field->holder(), obj, offset); 217 } else { 218 ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access); 219 } 220 221 // Adjust Java stack 222 if (type2size[bt] == 1) 223 push(ld); 224 else 225 push_pair(ld); 226 227 if (must_assert_null) { 228 // Do not take a trap here. It's possible that the program 229 // will never load the field's class, and will happily see 230 // null values in this field forever. Don't stumble into a 231 // trap for such a program, or we might get a long series 232 // of useless recompilations. (Or, we might load a class 233 // which should not be loaded.) If we ever see a non-null 234 // value, we will then trap and recompile. (The trap will 235 // not need to mention the class index, since the class will 236 // already have been loaded if we ever see a non-null value.) 237 // uncommon_trap(iter().get_field_signature_index()); 238 if (PrintOpto && (Verbose || WizardMode)) { 239 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); 240 } 241 if (C->log() != NULL) { 242 C->log()->elem("assert_null reason='field' klass='%d'", 243 C->log()->identify(field->type())); 244 } 245 // If there is going to be a trap, put it at the next bytecode: 246 set_bci(iter().next_bci()); 247 null_assert(peek()); 248 set_bci(iter().cur_bci()); // put it back 249 } 250 251 // If reference is volatile, prevent following memory ops from 252 // floating up past the volatile read. Also prevents commoning 253 // another volatile read. 254 if (field->is_volatile()) { 255 // Memory barrier includes bogus read of value to force load BEFORE membar 256 insert_mem_bar(Op_MemBarAcquire, ld); 257 } 258 } 259 260 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { 261 bool is_vol = field->is_volatile(); 262 // If reference is volatile, prevent following memory ops from 263 // floating down past the volatile write. Also prevents commoning 264 // another volatile read. 265 if (is_vol) insert_mem_bar(Op_MemBarRelease); 266 267 // Compute address and memory type. 268 int offset = field->offset_in_bytes(); 269 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 270 Node* adr = basic_plus_adr(obj, obj, offset); 271 BasicType bt = field->layout_type(); 272 // Value to be stored 273 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 274 // Round doubles before storing 275 if (bt == T_DOUBLE) val = dstore_rounding(val); 276 277 // Conservatively release stores of object references. 278 const MemNode::MemOrd mo = 279 is_vol ? 280 // Volatile fields need releasing stores. 281 MemNode::release : 282 // Non-volatile fields also need releasing stores if they hold an 283 // object reference, because the object reference might point to 284 // a freshly created object. 285 StoreNode::release_if_reference(bt); 286 287 // Store the value. 288 if (bt == T_OBJECT || bt == T_VALUETYPE) { 289 const TypeOopPtr* field_type; 290 if (!field->type()->is_loaded()) { 291 field_type = TypeInstPtr::BOTTOM; 292 } else { 293 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 294 } 295 if (bt == T_VALUETYPE && !field->is_static()) { 296 // Store flattened value type to non-static field 297 val->as_ValueType()->store_to_field(this, field->holder(), obj, offset); 298 } else { 299 store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo); 300 } 301 } else { 302 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; 303 store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access); 304 } 305 306 // If reference is volatile, prevent following volatiles ops from 307 // floating up before the volatile write. 308 if (is_vol) { 309 // If not multiple copy atomic, we do the MemBarVolatile before the load. 310 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 311 insert_mem_bar(Op_MemBarVolatile); // Use fat membar 312 } 313 // Remember we wrote a volatile field. 314 // For not multiple copy atomic cpu (ppc64) a barrier should be issued 315 // in constructors which have such stores. See do_exits() in parse1.cpp. 316 if (is_field) { 317 set_wrote_volatile(true); 318 } 319 } 320 321 if (is_field) { 322 set_wrote_fields(true); 323 } 324 325 // If the field is final, the rules of Java say we are in <init> or <clinit>. 326 // Note the presence of writes to final non-static fields, so that we 327 // can insert a memory barrier later on to keep the writes from floating 328 // out of the constructor. 329 // Any method can write a @Stable field; insert memory barriers after those also. 330 if (is_field && (field->is_final() || field->is_stable())) { 331 if (field->is_final()) { 332 set_wrote_final(true); 333 } 334 if (field->is_stable()) { 335 set_wrote_stable(true); 336 } 337 338 // Preserve allocation ptr to create precedent edge to it in membar 339 // generated on exit from constructor. 340 // Can't bind stable with its allocation, only record allocation for final field. 341 if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { 342 set_alloc_with_final(obj); 343 } 344 } 345 } 346 347 //============================================================================= 348 void Parse::do_anewarray() { 349 bool will_link; 350 ciKlass* klass = iter().get_klass(will_link); 351 352 // Uncommon Trap when class that array contains is not loaded 353 // we need the loaded class for the rest of graph; do not 354 // initialize the container class (see Java spec)!!! 355 assert(will_link, "anewarray: typeflow responsibility"); 356 357 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); 358 // Check that array_klass object is loaded 359 if (!array_klass->is_loaded()) { 360 // Generate uncommon_trap for unloaded array_class 361 uncommon_trap(Deoptimization::Reason_unloaded, 362 Deoptimization::Action_reinterpret, 363 array_klass); 364 return; 365 } 366 367 kill_dead_locals(); 368 369 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); 370 Node* count_val = pop(); 371 Node* obj = new_array(makecon(array_klass_type), count_val, 1); 372 push(obj); 373 } 374 375 376 void Parse::do_newarray(BasicType elem_type) { 377 kill_dead_locals(); 378 379 Node* count_val = pop(); 380 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); 381 Node* obj = new_array(makecon(array_klass), count_val, 1); 382 // Push resultant oop onto stack 383 push(obj); 384 } 385 386 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. 387 // Also handle the degenerate 1-dimensional case of anewarray. 388 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { 389 Node* length = lengths[0]; 390 assert(length != NULL, ""); 391 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); 392 if (ndimensions > 1) { 393 jint length_con = find_int_con(length, -1); 394 guarantee(length_con >= 0, "non-constant multianewarray"); 395 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); 396 const TypePtr* adr_type = TypeAryPtr::OOPS; 397 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 398 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 399 for (jint i = 0; i < length_con; i++) { 400 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); 401 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); 402 Node* eaddr = basic_plus_adr(array, offset); 403 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered); 404 } 405 } 406 return array; 407 } 408 409 void Parse::do_multianewarray() { 410 int ndimensions = iter().get_dimensions(); 411 412 // the m-dimensional array 413 bool will_link; 414 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); 415 assert(will_link, "multianewarray: typeflow responsibility"); 416 417 // Note: Array classes are always initialized; no is_initialized check. 418 419 kill_dead_locals(); 420 421 // get the lengths from the stack (first dimension is on top) 422 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); 423 length[ndimensions] = NULL; // terminating null for make_runtime_call 424 int j; 425 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); 426 427 // The original expression was of this form: new T[length0][length1]... 428 // It is often the case that the lengths are small (except the last). 429 // If that happens, use the fast 1-d creator a constant number of times. 430 const jint expand_limit = MIN2((jint)MultiArrayExpandLimit, 100); 431 jint expand_count = 1; // count of allocations in the expansion 432 jint expand_fanout = 1; // running total fanout 433 for (j = 0; j < ndimensions-1; j++) { 434 jint dim_con = find_int_con(length[j], -1); 435 expand_fanout *= dim_con; 436 expand_count += expand_fanout; // count the level-J sub-arrays 437 if (dim_con <= 0 438 || dim_con > expand_limit 439 || expand_count > expand_limit) { 440 expand_count = 0; 441 break; 442 } 443 } 444 445 // Can use multianewarray instead of [a]newarray if only one dimension, 446 // or if all non-final dimensions are small constants. 447 if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { 448 Node* obj = NULL; 449 // Set the original stack and the reexecute bit for the interpreter 450 // to reexecute the multianewarray bytecode if deoptimization happens. 451 // Do it unconditionally even for one dimension multianewarray. 452 // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() 453 // when AllocateArray node for newarray is created. 454 { PreserveReexecuteState preexecs(this); 455 inc_sp(ndimensions); 456 // Pass 0 as nargs since uncommon trap code does not need to restore stack. 457 obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); 458 } //original reexecute and sp are set back here 459 push(obj); 460 return; 461 } 462 463 address fun = NULL; 464 switch (ndimensions) { 465 case 1: ShouldNotReachHere(); break; 466 case 2: fun = OptoRuntime::multianewarray2_Java(); break; 467 case 3: fun = OptoRuntime::multianewarray3_Java(); break; 468 case 4: fun = OptoRuntime::multianewarray4_Java(); break; 469 case 5: fun = OptoRuntime::multianewarray5_Java(); break; 470 }; 471 Node* c = NULL; 472 473 if (fun != NULL) { 474 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 475 OptoRuntime::multianewarray_Type(ndimensions), 476 fun, NULL, TypeRawPtr::BOTTOM, 477 makecon(TypeKlassPtr::make(array_klass)), 478 length[0], length[1], length[2], 479 (ndimensions > 2) ? length[3] : NULL, 480 (ndimensions > 3) ? length[4] : NULL); 481 } else { 482 // Create a java array for dimension sizes 483 Node* dims = NULL; 484 { PreserveReexecuteState preexecs(this); 485 inc_sp(ndimensions); 486 Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); 487 dims = new_array(dims_array_klass, intcon(ndimensions), 0); 488 489 // Fill-in it with values 490 for (j = 0; j < ndimensions; j++) { 491 Node *dims_elem = array_element_address(dims, intcon(j), T_INT); 492 store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered); 493 } 494 } 495 496 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 497 OptoRuntime::multianewarrayN_Type(), 498 OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM, 499 makecon(TypeKlassPtr::make(array_klass)), 500 dims); 501 } 502 make_slow_call_ex(c, env()->Throwable_klass(), false); 503 504 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms)); 505 506 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); 507 508 // Improve the type: We know it's not null, exact, and of a given length. 509 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); 510 type = type->is_aryptr()->cast_to_exactness(true); 511 512 const TypeInt* ltype = _gvn.find_int_type(length[0]); 513 if (ltype != NULL) 514 type = type->is_aryptr()->cast_to_size(ltype); 515 516 // We cannot sharpen the nested sub-arrays, since the top level is mutable. 517 518 Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) ); 519 push(cast); 520 521 // Possible improvements: 522 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) 523 // - Issue CastII against length[*] values, to TypeInt::POS. 524 } 525 526 void Parse::do_vbox() { 527 // Obtain a value type from the top of the stack 528 ValueTypeNode* vt = pop()->as_ValueType(); 529 530 // Obtain types 531 bool will_link; 532 ciValueKlass* dvt_klass = gvn().type(vt)->is_valuetype()->value_klass(); 533 ciInstanceKlass* vcc_klass = iter().get_klass(will_link)->as_instance_klass(); 534 guarantee(will_link, "value-capable class must be loaded"); 535 536 kill_dead_locals(); 537 538 // TODO: Generate all (or some) of the following checks 539 // (1) if target is not an value-capable instance, throw ClassCastException 540 // (2) if source is not a value type instance, throw ClassCastException 541 // (3) if target type is not a value type derived from source 542 543 // create new object 544 Node* kls = makecon(TypeKlassPtr::make(vcc_klass)); 545 Node* obj = new_instance(kls); 546 547 // Store all field values to the newly created object. 548 // The code below relies on the assumption that the VCC has the 549 // same memory layout as the derived value type. 550 // TODO: Once the layout of the two is not the same, update code below. 551 vt->store_values(this, vcc_klass, obj); 552 553 // Push the new object onto the stack 554 push(obj); 555 } 556 557 void Parse::do_vunbox() { 558 // Obtain object from the top of the stack 559 Node* obj = pop(); 560 561 // Obtain types 562 bool will_link; 563 ciInstanceKlass* vcc_klass = gvn().type(obj)->is_oopptr()->klass()->as_instance_klass(); 564 ciValueKlass* dvt_klass = iter().get_klass(will_link)->as_value_klass(); 565 guarantee(will_link, "derived value type must be loaded"); 566 567 // TOOD: Generate all the checks. Similar to vbox 568 569 // Create a value type node with the corresponding type 570 Node* vt = ValueTypeNode::make(gvn(), dvt_klass, map()->memory(), vcc_klass, obj, dvt_klass->first_field_offset()); 571 572 // Push the value type onto the stack 573 push(vt); 574 }