1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "interpreter/linkResolver.hpp" 28 #include "memory/universe.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "oops/valueArrayKlass.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/castnode.hpp" 33 #include "opto/memnode.hpp" 34 #include "opto/parse.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "opto/subnode.hpp" 38 #include "opto/valuetypenode.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/handles.inline.hpp" 41 42 //============================================================================= 43 // Helper methods for _get* and _put* bytecodes 44 //============================================================================= 45 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) { 46 // Could be the field_holder's <clinit> method, or <clinit> for a subklass. 47 // Better to check now than to Deoptimize as soon as we execute 48 assert( field->is_static(), "Only check if field is static"); 49 // is_being_initialized() is too generous. It allows access to statics 50 // by threads that are not running the <clinit> before the <clinit> finishes. 51 // return field->holder()->is_being_initialized(); 52 53 // The following restriction is correct but conservative. 54 // It is also desirable to allow compilation of methods called from <clinit> 55 // but this generated code will need to be made safe for execution by 56 // other threads, or the transition from interpreted to compiled code would 57 // need to be guarded. 58 ciInstanceKlass *field_holder = field->holder(); 59 60 bool access_OK = false; 61 if (method->holder()->is_subclass_of(field_holder)) { 62 if (method->is_static()) { 63 if (method->name() == ciSymbol::class_initializer_name()) { 64 // OK to access static fields inside initializer 65 access_OK = true; 66 } 67 } else { 68 if (method->name() == ciSymbol::object_initializer_name()) { 69 // It's also OK to access static fields inside a constructor, 70 // because any thread calling the constructor must first have 71 // synchronized on the class by executing a '_new' bytecode. 72 access_OK = true; 73 } 74 } 75 } 76 77 return access_OK; 78 79 } 80 81 82 void Parse::do_field_access(bool is_get, bool is_field) { 83 bool will_link; 84 ciField* field = iter().get_field(will_link); 85 assert(will_link, "getfield: typeflow responsibility"); 86 87 ciInstanceKlass* field_holder = field->holder(); 88 89 if (is_field && field_holder->is_valuetype() && peek()->is_ValueType()) { 90 assert(is_get, "value type field store not supported"); 91 ValueTypeNode* vt = pop()->as_ValueType(); 92 Node* value = vt->field_value_by_offset(field->offset()); 93 push_node(field->layout_type(), value); 94 return; 95 } 96 97 if (is_field == field->is_static()) { 98 // Interpreter will throw java_lang_IncompatibleClassChangeError 99 // Check this before allowing <clinit> methods to access static fields 100 uncommon_trap(Deoptimization::Reason_unhandled, 101 Deoptimization::Action_none); 102 return; 103 } 104 105 if (!is_field && !field_holder->is_initialized()) { 106 if (!static_field_ok_in_clinit(field, method())) { 107 uncommon_trap(Deoptimization::Reason_uninitialized, 108 Deoptimization::Action_reinterpret, 109 NULL, "!static_field_ok_in_clinit"); 110 return; 111 } 112 } 113 114 // Deoptimize on putfield writes to call site target field. 115 if (!is_get && field->is_call_site_target()) { 116 uncommon_trap(Deoptimization::Reason_unhandled, 117 Deoptimization::Action_reinterpret, 118 NULL, "put to call site target field"); 119 return; 120 } 121 122 assert(field->will_link(method(), bc()), "getfield: typeflow responsibility"); 123 124 // Note: We do not check for an unloaded field type here any more. 125 126 // Generate code for the object pointer. 127 Node* obj; 128 if (is_field) { 129 int obj_depth = is_get ? 0 : field->type()->size(); 130 obj = null_check(peek(obj_depth)); 131 // Compile-time detect of null-exception? 132 if (stopped()) return; 133 134 #ifdef ASSERT 135 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 136 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 137 #endif 138 139 if (is_get) { 140 (void) pop(); // pop receiver before getting 141 do_get_xxx(obj, field, is_field); 142 } else { 143 do_put_xxx(obj, field, is_field); 144 if (stopped()) { 145 return; 146 } 147 (void) pop(); // pop receiver after putting 148 } 149 } else { 150 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); 151 obj = _gvn.makecon(tip); 152 if (is_get) { 153 do_get_xxx(obj, field, is_field); 154 } else { 155 do_put_xxx(obj, field, is_field); 156 } 157 } 158 } 159 160 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { 161 BasicType bt = field->layout_type(); 162 163 // Does this field have a constant value? If so, just push the value. 164 if (field->is_constant() && 165 // Keep consistent with types found by ciTypeFlow: for an 166 // unloaded field type, ciTypeFlow::StateVector::do_getstatic() 167 // speculates the field is null. The code in the rest of this 168 // method does the same. We must not bypass it and use a non 169 // null constant here. 170 (bt != T_OBJECT || field->type()->is_loaded())) { 171 // final or stable field 172 Node* con = make_constant_from_field(field, obj); 173 if (con != NULL) { 174 push_node(field->layout_type(), con); 175 return; 176 } 177 } 178 179 ciType* field_klass = field->type(); 180 bool is_vol = field->is_volatile(); 181 bool flattened = field->is_flattened(); 182 bool flattenable = field->is_flattenable(); 183 184 // Compute address and memory type. 185 int offset = field->offset_in_bytes(); 186 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 187 Node *adr = basic_plus_adr(obj, obj, offset); 188 189 // Build the resultant type of the load 190 const Type *type; 191 192 bool must_assert_null = false; 193 194 if (bt == T_OBJECT || bt == T_ARRAY || bt == T_VALUETYPE) { 195 if (!field->type()->is_loaded()) { 196 type = TypeInstPtr::BOTTOM; 197 must_assert_null = true; 198 } else if (field->is_static_constant()) { 199 // This can happen if the constant oop is non-perm. 200 ciObject* con = field->constant_value().as_object(); 201 // Do not "join" in the previous type; it doesn't add value, 202 // and may yield a vacuous result if the field is of interface type. 203 if (con->is_null_object()) { 204 type = TypePtr::NULL_PTR; 205 } else { 206 type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 207 } 208 assert(type != NULL, "field singleton type must be consistent"); 209 } else { 210 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 211 if (bt == T_VALUETYPE && field->is_static()) { 212 // Check if static value type field is already initialized 213 assert(!flattened, "static fields should not be flattened"); 214 ciInstance* mirror = field->holder()->java_mirror(); 215 ciObject* val = mirror->field_value(field).as_object(); 216 if (!val->is_null_object()) { 217 type = type->join_speculative(TypePtr::NOTNULL); 218 } 219 } 220 } 221 } else { 222 type = Type::get_const_basic_type(bt); 223 } 224 225 Node* ld = NULL; 226 if (flattened) { 227 // Load flattened value type 228 ld = ValueTypeNode::make_from_flattened(this, field_klass->as_value_klass(), obj, obj, field->holder(), offset); 229 } else { 230 DecoratorSet decorators = IN_HEAP; 231 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED; 232 ld = access_load_at(obj, adr, adr_type, type, bt, decorators); 233 if (bt == T_VALUETYPE) { 234 // Load a non-flattened value type from memory 235 if (field_klass->as_value_klass()->is_scalarizable()) { 236 ld = ValueTypeNode::make_from_oop(this, ld, field_klass->as_value_klass(), /* buffer_check */ false, /* null2default */ flattenable, iter().next_bci()); 237 } else if (gvn().type(ld)->maybe_null()){ 238 ld = filter_null(ld, flattenable, field_klass->as_value_klass(), iter().next_bci()); 239 } 240 } 241 } 242 243 // Adjust Java stack 244 if (type2size[bt] == 1) 245 push(ld); 246 else 247 push_pair(ld); 248 249 if (must_assert_null) { 250 // Do not take a trap here. It's possible that the program 251 // will never load the field's class, and will happily see 252 // null values in this field forever. Don't stumble into a 253 // trap for such a program, or we might get a long series 254 // of useless recompilations. (Or, we might load a class 255 // which should not be loaded.) If we ever see a non-null 256 // value, we will then trap and recompile. (The trap will 257 // not need to mention the class index, since the class will 258 // already have been loaded if we ever see a non-null value.) 259 // uncommon_trap(iter().get_field_signature_index()); 260 if (PrintOpto && (Verbose || WizardMode)) { 261 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); 262 } 263 if (C->log() != NULL) { 264 C->log()->elem("assert_null reason='field' klass='%d'", 265 C->log()->identify(field->type())); 266 } 267 // If there is going to be a trap, put it at the next bytecode: 268 set_bci(iter().next_bci()); 269 null_assert(peek()); 270 set_bci(iter().cur_bci()); // put it back 271 } 272 } 273 274 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { 275 bool is_vol = field->is_volatile(); 276 277 // Compute address and memory type. 278 int offset = field->offset_in_bytes(); 279 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 280 Node* adr = basic_plus_adr(obj, obj, offset); 281 BasicType bt = field->layout_type(); 282 // Value to be stored 283 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 284 285 DecoratorSet decorators = IN_HEAP; 286 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED; 287 288 // Store the value. 289 const Type* field_type; 290 if (!field->type()->is_loaded()) { 291 field_type = TypeInstPtr::BOTTOM; 292 } else { 293 if (bt == T_OBJECT || bt == T_ARRAY || bt == T_VALUETYPE) { 294 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 295 } else { 296 field_type = Type::BOTTOM; 297 } 298 } 299 if (field->is_flattenable() && !val->is_ValueType() && gvn().type(val)->maybe_null()) { 300 // We can see a null constant here 301 assert(val->bottom_type()->remove_speculative() == TypePtr::NULL_PTR, "Anything other than null?"); 302 push(null()); 303 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 304 assert(stopped(), "dead path"); 305 return; 306 } else if (field->is_flattened()) { 307 if (!val->is_ValueType()) { 308 assert(!gvn().type(val)->maybe_null(), "should never be null"); 309 val = ValueTypeNode::make_from_oop(this, val, field->type()->as_value_klass()); 310 } 311 // Store flattened value type to a non-static field 312 assert(bt == T_VALUETYPE, "flattening is only supported for value type fields"); 313 val->as_ValueType()->store_flattened(this, obj, obj, field->holder(), offset); 314 } else { 315 access_store_at(control(), obj, adr, adr_type, val, field_type, bt, decorators); 316 } 317 318 if (is_field) { 319 // Remember we wrote a volatile field. 320 // For not multiple copy atomic cpu (ppc64) a barrier should be issued 321 // in constructors which have such stores. See do_exits() in parse1.cpp. 322 if (is_vol) { 323 set_wrote_volatile(true); 324 } 325 set_wrote_fields(true); 326 327 // If the field is final, the rules of Java say we are in <init> or <clinit>. 328 // Note the presence of writes to final non-static fields, so that we 329 // can insert a memory barrier later on to keep the writes from floating 330 // out of the constructor. 331 // Any method can write a @Stable field; insert memory barriers after those also. 332 if (field->is_final()) { 333 set_wrote_final(true); 334 if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { 335 // Preserve allocation ptr to create precedent edge to it in membar 336 // generated on exit from constructor. 337 // Can't bind stable with its allocation, only record allocation for final field. 338 set_alloc_with_final(obj); 339 } 340 } 341 if (field->is_stable()) { 342 set_wrote_stable(true); 343 } 344 } 345 } 346 347 //============================================================================= 348 349 void Parse::do_newarray() { 350 bool will_link; 351 ciKlass* klass = iter().get_klass(will_link); 352 353 // Uncommon Trap when class that array contains is not loaded 354 // we need the loaded class for the rest of graph; do not 355 // initialize the container class (see Java spec)!!! 356 assert(will_link, "newarray: typeflow responsibility"); 357 358 ciArrayKlass* array_klass = ciArrayKlass::make(klass); 359 // Check that array_klass object is loaded 360 if (!array_klass->is_loaded()) { 361 // Generate uncommon_trap for unloaded array_class 362 uncommon_trap(Deoptimization::Reason_unloaded, 363 Deoptimization::Action_reinterpret, 364 array_klass); 365 return; 366 } else if (array_klass->element_klass() != NULL && 367 array_klass->element_klass()->is_valuetype() && 368 !array_klass->element_klass()->as_value_klass()->is_initialized()) { 369 uncommon_trap(Deoptimization::Reason_uninitialized, 370 Deoptimization::Action_reinterpret, 371 NULL); 372 return; 373 } 374 375 kill_dead_locals(); 376 377 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); 378 Node* count_val = pop(); 379 Node* obj = new_array(makecon(array_klass_type), count_val, 1); 380 push(obj); 381 } 382 383 384 void Parse::do_newarray(BasicType elem_type) { 385 kill_dead_locals(); 386 387 Node* count_val = pop(); 388 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); 389 Node* obj = new_array(makecon(array_klass), count_val, 1); 390 // Push resultant oop onto stack 391 push(obj); 392 } 393 394 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. 395 // Also handle the degenerate 1-dimensional case of anewarray. 396 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { 397 Node* length = lengths[0]; 398 assert(length != NULL, ""); 399 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); 400 if (ndimensions > 1) { 401 jint length_con = find_int_con(length, -1); 402 guarantee(length_con >= 0, "non-constant multianewarray"); 403 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); 404 const TypePtr* adr_type = TypeAryPtr::OOPS; 405 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 406 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 407 for (jint i = 0; i < length_con; i++) { 408 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); 409 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); 410 Node* eaddr = basic_plus_adr(array, offset); 411 access_store_at(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, IN_HEAP | IS_ARRAY); 412 } 413 } 414 return array; 415 } 416 417 void Parse::do_multianewarray() { 418 int ndimensions = iter().get_dimensions(); 419 420 // the m-dimensional array 421 bool will_link; 422 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); 423 assert(will_link, "multianewarray: typeflow responsibility"); 424 425 // Note: Array classes are always initialized; no is_initialized check. 426 427 kill_dead_locals(); 428 429 // get the lengths from the stack (first dimension is on top) 430 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); 431 length[ndimensions] = NULL; // terminating null for make_runtime_call 432 int j; 433 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); 434 435 // The original expression was of this form: new T[length0][length1]... 436 // It is often the case that the lengths are small (except the last). 437 // If that happens, use the fast 1-d creator a constant number of times. 438 const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100); 439 int expand_count = 1; // count of allocations in the expansion 440 int expand_fanout = 1; // running total fanout 441 for (j = 0; j < ndimensions-1; j++) { 442 int dim_con = find_int_con(length[j], -1); 443 expand_fanout *= dim_con; 444 expand_count += expand_fanout; // count the level-J sub-arrays 445 if (dim_con <= 0 446 || dim_con > expand_limit 447 || expand_count > expand_limit) { 448 expand_count = 0; 449 break; 450 } 451 } 452 453 // Can use multianewarray instead of [a]newarray if only one dimension, 454 // or if all non-final dimensions are small constants. 455 if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { 456 Node* obj = NULL; 457 // Set the original stack and the reexecute bit for the interpreter 458 // to reexecute the multianewarray bytecode if deoptimization happens. 459 // Do it unconditionally even for one dimension multianewarray. 460 // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() 461 // when AllocateArray node for newarray is created. 462 { PreserveReexecuteState preexecs(this); 463 inc_sp(ndimensions); 464 // Pass 0 as nargs since uncommon trap code does not need to restore stack. 465 obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); 466 } //original reexecute and sp are set back here 467 push(obj); 468 return; 469 } 470 471 address fun = NULL; 472 switch (ndimensions) { 473 case 1: ShouldNotReachHere(); break; 474 case 2: fun = OptoRuntime::multianewarray2_Java(); break; 475 case 3: fun = OptoRuntime::multianewarray3_Java(); break; 476 case 4: fun = OptoRuntime::multianewarray4_Java(); break; 477 case 5: fun = OptoRuntime::multianewarray5_Java(); break; 478 }; 479 Node* c = NULL; 480 481 if (fun != NULL) { 482 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 483 OptoRuntime::multianewarray_Type(ndimensions), 484 fun, NULL, TypeRawPtr::BOTTOM, 485 makecon(TypeKlassPtr::make(array_klass)), 486 length[0], length[1], length[2], 487 (ndimensions > 2) ? length[3] : NULL, 488 (ndimensions > 3) ? length[4] : NULL); 489 } else { 490 // Create a java array for dimension sizes 491 Node* dims = NULL; 492 { PreserveReexecuteState preexecs(this); 493 inc_sp(ndimensions); 494 Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); 495 dims = new_array(dims_array_klass, intcon(ndimensions), 0); 496 497 // Fill-in it with values 498 for (j = 0; j < ndimensions; j++) { 499 Node *dims_elem = array_element_address(dims, intcon(j), T_INT); 500 store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered); 501 } 502 } 503 504 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 505 OptoRuntime::multianewarrayN_Type(), 506 OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM, 507 makecon(TypeKlassPtr::make(array_klass)), 508 dims); 509 } 510 make_slow_call_ex(c, env()->Throwable_klass(), false); 511 512 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms)); 513 514 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); 515 516 // Improve the type: We know it's not null, exact, and of a given length. 517 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); 518 type = type->is_aryptr()->cast_to_exactness(true); 519 520 const TypeInt* ltype = _gvn.find_int_type(length[0]); 521 if (ltype != NULL) 522 type = type->is_aryptr()->cast_to_size(ltype); 523 524 // We cannot sharpen the nested sub-arrays, since the top level is mutable. 525 526 Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) ); 527 push(cast); 528 529 // Possible improvements: 530 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) 531 // - Issue CastII against length[*] values, to TypeInt::POS. 532 }