1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "interpreter/linkResolver.hpp" 28 #include "memory/universe.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/castnode.hpp" 32 #include "opto/memnode.hpp" 33 #include "opto/parse.hpp" 34 #include "opto/rootnode.hpp" 35 #include "opto/runtime.hpp" 36 #include "opto/subnode.hpp" 37 #include "runtime/deoptimization.hpp" 38 #include "runtime/handles.inline.hpp" 39 40 //============================================================================= 41 // Helper methods for _get* and _put* bytecodes 42 //============================================================================= 43 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) { 44 // Could be the field_holder's <clinit> method, or <clinit> for a subklass. 45 // Better to check now than to Deoptimize as soon as we execute 46 assert( field->is_static(), "Only check if field is static"); 47 // is_being_initialized() is too generous. It allows access to statics 48 // by threads that are not running the <clinit> before the <clinit> finishes. 49 // return field->holder()->is_being_initialized(); 50 51 // The following restriction is correct but conservative. 52 // It is also desirable to allow compilation of methods called from <clinit> 53 // but this generated code will need to be made safe for execution by 54 // other threads, or the transition from interpreted to compiled code would 55 // need to be guarded. 56 ciInstanceKlass *field_holder = field->holder(); 57 58 bool access_OK = false; 59 if (method->holder()->is_subclass_of(field_holder)) { 60 if (method->is_static()) { 61 if (method->name() == ciSymbol::class_initializer_name()) { 62 // OK to access static fields inside initializer 63 access_OK = true; 64 } 65 } else { 66 if (method->name() == ciSymbol::object_initializer_name()) { 67 // It's also OK to access static fields inside a constructor, 68 // because any thread calling the constructor must first have 69 // synchronized on the class by executing a '_new' bytecode. 70 access_OK = true; 71 } 72 } 73 } 74 75 return access_OK; 76 77 } 78 79 80 void Parse::do_field_access(bool is_get, bool is_field) { 81 bool will_link; 82 ciField* field = iter().get_field(will_link); 83 assert(will_link, "getfield: typeflow responsibility"); 84 85 ciInstanceKlass* field_holder = field->holder(); 86 87 if (is_field == field->is_static()) { 88 // Interpreter will throw java_lang_IncompatibleClassChangeError 89 // Check this before allowing <clinit> methods to access static fields 90 uncommon_trap(Deoptimization::Reason_unhandled, 91 Deoptimization::Action_none); 92 return; 93 } 94 95 if (!is_field && !field_holder->is_initialized()) { 96 if (!static_field_ok_in_clinit(field, method())) { 97 uncommon_trap(Deoptimization::Reason_uninitialized, 98 Deoptimization::Action_reinterpret, 99 NULL, "!static_field_ok_in_clinit"); 100 return; 101 } 102 } 103 104 // Deoptimize on putfield writes to call site target field. 105 if (!is_get && field->is_call_site_target()) { 106 uncommon_trap(Deoptimization::Reason_unhandled, 107 Deoptimization::Action_reinterpret, 108 NULL, "put to call site target field"); 109 return; 110 } 111 112 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility"); 113 114 // Note: We do not check for an unloaded field type here any more. 115 116 // Generate code for the object pointer. 117 Node* obj; 118 if (is_field) { 119 int obj_depth = is_get ? 0 : field->type()->size(); 120 obj = null_check(peek(obj_depth)); 121 // Compile-time detect of null-exception? 122 if (stopped()) return; 123 124 #ifdef ASSERT 125 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 126 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 127 #endif 128 129 if (is_get) { 130 (void) pop(); // pop receiver before getting 131 do_get_xxx(obj, field, is_field); 132 } else { 133 do_put_xxx(obj, field, is_field); 134 (void) pop(); // pop receiver after putting 135 } 136 } else { 137 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); 138 obj = _gvn.makecon(tip); 139 if (is_get) { 140 do_get_xxx(obj, field, is_field); 141 } else { 142 do_put_xxx(obj, field, is_field); 143 } 144 } 145 } 146 147 148 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { 149 BasicType bt = field->layout_type(); 150 151 // Does this field have a constant value? If so, just push the value. 152 if (field->is_constant() && 153 // Keep consistent with types found by ciTypeFlow: for an 154 // unloaded field type, ciTypeFlow::StateVector::do_getstatic() 155 // speculates the field is null. The code in the rest of this 156 // method does the same. We must not bypass it and use a non 157 // null constant here. 158 (bt != T_OBJECT || field->type()->is_loaded())) { 159 // final or stable field 160 Node* con = make_constant_from_field(field, obj); 161 if (con != NULL) { 162 push_node(field->layout_type(), con); 163 return; 164 } 165 } 166 167 ciType* field_klass = field->type(); 168 bool is_vol = field->is_volatile(); 169 170 // Compute address and memory type. 171 int offset = field->offset_in_bytes(); 172 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 173 Node *adr = basic_plus_adr(obj, obj, offset); 174 175 // Build the resultant type of the load 176 const Type *type; 177 178 bool must_assert_null = false; 179 180 if( bt == T_OBJECT ) { 181 if (!field->type()->is_loaded()) { 182 type = TypeInstPtr::BOTTOM; 183 must_assert_null = true; 184 } else if (field->is_static_constant()) { 185 // This can happen if the constant oop is non-perm. 186 ciObject* con = field->constant_value().as_object(); 187 // Do not "join" in the previous type; it doesn't add value, 188 // and may yield a vacuous result if the field is of interface type. 189 if (con->is_null_object()) { 190 type = TypePtr::NULL_PTR; 191 } else { 192 type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 193 } 194 assert(type != NULL, "field singleton type must be consistent"); 195 } else { 196 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 197 } 198 } else { 199 type = Type::get_const_basic_type(bt); 200 } 201 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) { 202 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier 203 } 204 // Build the load. 205 // 206 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; 207 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; 208 Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access); 209 210 if (UseZGC && bt == T_OBJECT) { 211 ld = load_barrier(ld, adr); 212 } 213 214 // Adjust Java stack 215 if (type2size[bt] == 1) 216 push(ld); 217 else 218 push_pair(ld); 219 220 if (must_assert_null) { 221 // Do not take a trap here. It's possible that the program 222 // will never load the field's class, and will happily see 223 // null values in this field forever. Don't stumble into a 224 // trap for such a program, or we might get a long series 225 // of useless recompilations. (Or, we might load a class 226 // which should not be loaded.) If we ever see a non-null 227 // value, we will then trap and recompile. (The trap will 228 // not need to mention the class index, since the class will 229 // already have been loaded if we ever see a non-null value.) 230 // uncommon_trap(iter().get_field_signature_index()); 231 if (PrintOpto && (Verbose || WizardMode)) { 232 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); 233 } 234 if (C->log() != NULL) { 235 C->log()->elem("assert_null reason='field' klass='%d'", 236 C->log()->identify(field->type())); 237 } 238 // If there is going to be a trap, put it at the next bytecode: 239 set_bci(iter().next_bci()); 240 null_assert(peek()); 241 set_bci(iter().cur_bci()); // put it back 242 } 243 244 // If reference is volatile, prevent following memory ops from 245 // floating up past the volatile read. Also prevents commoning 246 // another volatile read. 247 if (field->is_volatile()) { 248 // Memory barrier includes bogus read of value to force load BEFORE membar 249 insert_mem_bar(Op_MemBarAcquire, ld); 250 } 251 } 252 253 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { 254 bool is_vol = field->is_volatile(); 255 // If reference is volatile, prevent following memory ops from 256 // floating down past the volatile write. Also prevents commoning 257 // another volatile read. 258 if (is_vol) insert_mem_bar(Op_MemBarRelease); 259 260 // Compute address and memory type. 261 int offset = field->offset_in_bytes(); 262 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 263 Node* adr = basic_plus_adr(obj, obj, offset); 264 BasicType bt = field->layout_type(); 265 // Value to be stored 266 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 267 // Round doubles before storing 268 if (bt == T_DOUBLE) val = dstore_rounding(val); 269 270 // Conservatively release stores of object references. 271 const MemNode::MemOrd mo = 272 is_vol ? 273 // Volatile fields need releasing stores. 274 MemNode::release : 275 // Non-volatile fields also need releasing stores if they hold an 276 // object reference, because the object reference might point to 277 // a freshly created object. 278 StoreNode::release_if_reference(bt); 279 280 // Store the value. 281 Node* store; 282 if (bt == T_OBJECT) { 283 const TypeOopPtr* field_type; 284 if (!field->type()->is_loaded()) { 285 field_type = TypeInstPtr::BOTTOM; 286 } else { 287 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 288 } 289 store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo); 290 } else { 291 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; 292 store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access); 293 } 294 295 // If reference is volatile, prevent following volatiles ops from 296 // floating up before the volatile write. 297 if (is_vol) { 298 // If not multiple copy atomic, we do the MemBarVolatile before the load. 299 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 300 insert_mem_bar(Op_MemBarVolatile); // Use fat membar 301 } 302 // Remember we wrote a volatile field. 303 // For not multiple copy atomic cpu (ppc64) a barrier should be issued 304 // in constructors which have such stores. See do_exits() in parse1.cpp. 305 if (is_field) { 306 set_wrote_volatile(true); 307 } 308 } 309 310 if (is_field) { 311 set_wrote_fields(true); 312 } 313 314 // If the field is final, the rules of Java say we are in <init> or <clinit>. 315 // Note the presence of writes to final non-static fields, so that we 316 // can insert a memory barrier later on to keep the writes from floating 317 // out of the constructor. 318 // Any method can write a @Stable field; insert memory barriers after those also. 319 if (is_field && (field->is_final() || field->is_stable())) { 320 if (field->is_final()) { 321 set_wrote_final(true); 322 } 323 if (field->is_stable()) { 324 set_wrote_stable(true); 325 } 326 327 // Preserve allocation ptr to create precedent edge to it in membar 328 // generated on exit from constructor. 329 // Can't bind stable with its allocation, only record allocation for final field. 330 if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { 331 set_alloc_with_final(obj); 332 } 333 } 334 } 335 336 //============================================================================= 337 void Parse::do_anewarray() { 338 bool will_link; 339 ciKlass* klass = iter().get_klass(will_link); 340 341 // Uncommon Trap when class that array contains is not loaded 342 // we need the loaded class for the rest of graph; do not 343 // initialize the container class (see Java spec)!!! 344 assert(will_link, "anewarray: typeflow responsibility"); 345 346 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); 347 // Check that array_klass object is loaded 348 if (!array_klass->is_loaded()) { 349 // Generate uncommon_trap for unloaded array_class 350 uncommon_trap(Deoptimization::Reason_unloaded, 351 Deoptimization::Action_reinterpret, 352 array_klass); 353 return; 354 } 355 356 kill_dead_locals(); 357 358 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); 359 Node* count_val = pop(); 360 Node* obj = new_array(makecon(array_klass_type), count_val, 1); 361 push(obj); 362 } 363 364 365 void Parse::do_newarray(BasicType elem_type) { 366 kill_dead_locals(); 367 368 Node* count_val = pop(); 369 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); 370 Node* obj = new_array(makecon(array_klass), count_val, 1); 371 // Push resultant oop onto stack 372 push(obj); 373 } 374 375 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. 376 // Also handle the degenerate 1-dimensional case of anewarray. 377 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { 378 Node* length = lengths[0]; 379 assert(length != NULL, ""); 380 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); 381 if (ndimensions > 1) { 382 jint length_con = find_int_con(length, -1); 383 guarantee(length_con >= 0, "non-constant multianewarray"); 384 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); 385 const TypePtr* adr_type = TypeAryPtr::OOPS; 386 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 387 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 388 for (jint i = 0; i < length_con; i++) { 389 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); 390 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); 391 Node* eaddr = basic_plus_adr(array, offset); 392 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered); 393 } 394 } 395 return array; 396 } 397 398 void Parse::do_multianewarray() { 399 int ndimensions = iter().get_dimensions(); 400 401 // the m-dimensional array 402 bool will_link; 403 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); 404 assert(will_link, "multianewarray: typeflow responsibility"); 405 406 // Note: Array classes are always initialized; no is_initialized check. 407 408 kill_dead_locals(); 409 410 // get the lengths from the stack (first dimension is on top) 411 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); 412 length[ndimensions] = NULL; // terminating null for make_runtime_call 413 int j; 414 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); 415 416 // The original expression was of this form: new T[length0][length1]... 417 // It is often the case that the lengths are small (except the last). 418 // If that happens, use the fast 1-d creator a constant number of times. 419 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100); 420 int expand_count = 1; // count of allocations in the expansion 421 int expand_fanout = 1; // running total fanout 422 for (j = 0; j < ndimensions-1; j++) { 423 int dim_con = find_int_con(length[j], -1); 424 expand_fanout *= dim_con; 425 expand_count += expand_fanout; // count the level-J sub-arrays 426 if (dim_con <= 0 427 || dim_con > expand_limit 428 || expand_count > expand_limit) { 429 expand_count = 0; 430 break; 431 } 432 } 433 434 // Can use multianewarray instead of [a]newarray if only one dimension, 435 // or if all non-final dimensions are small constants. 436 if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { 437 Node* obj = NULL; 438 // Set the original stack and the reexecute bit for the interpreter 439 // to reexecute the multianewarray bytecode if deoptimization happens. 440 // Do it unconditionally even for one dimension multianewarray. 441 // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() 442 // when AllocateArray node for newarray is created. 443 { PreserveReexecuteState preexecs(this); 444 inc_sp(ndimensions); 445 // Pass 0 as nargs since uncommon trap code does not need to restore stack. 446 obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); 447 } //original reexecute and sp are set back here 448 push(obj); 449 return; 450 } 451 452 address fun = NULL; 453 switch (ndimensions) { 454 case 1: ShouldNotReachHere(); break; 455 case 2: fun = OptoRuntime::multianewarray2_Java(); break; 456 case 3: fun = OptoRuntime::multianewarray3_Java(); break; 457 case 4: fun = OptoRuntime::multianewarray4_Java(); break; 458 case 5: fun = OptoRuntime::multianewarray5_Java(); break; 459 }; 460 Node* c = NULL; 461 462 if (fun != NULL) { 463 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 464 OptoRuntime::multianewarray_Type(ndimensions), 465 fun, NULL, TypeRawPtr::BOTTOM, 466 makecon(TypeKlassPtr::make(array_klass)), 467 length[0], length[1], length[2], 468 (ndimensions > 2) ? length[3] : NULL, 469 (ndimensions > 3) ? length[4] : NULL); 470 } else { 471 // Create a java array for dimension sizes 472 Node* dims = NULL; 473 { PreserveReexecuteState preexecs(this); 474 inc_sp(ndimensions); 475 Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); 476 dims = new_array(dims_array_klass, intcon(ndimensions), 0); 477 478 // Fill-in it with values 479 for (j = 0; j < ndimensions; j++) { 480 Node *dims_elem = array_element_address(dims, intcon(j), T_INT); 481 store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered); 482 } 483 } 484 485 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 486 OptoRuntime::multianewarrayN_Type(), 487 OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM, 488 makecon(TypeKlassPtr::make(array_klass)), 489 dims); 490 } 491 make_slow_call_ex(c, env()->Throwable_klass(), false); 492 493 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms)); 494 495 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); 496 497 // Improve the type: We know it's not null, exact, and of a given length. 498 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); 499 type = type->is_aryptr()->cast_to_exactness(true); 500 501 const TypeInt* ltype = _gvn.find_int_type(length[0]); 502 if (ltype != NULL) 503 type = type->is_aryptr()->cast_to_size(ltype); 504 505 // We cannot sharpen the nested sub-arrays, since the top level is mutable. 506 507 Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) ); 508 push(cast); 509 510 // Possible improvements: 511 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) 512 // - Issue CastII against length[*] values, to TypeInt::POS. 513 }