1 /* 2 * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_parse3.cpp.incl" 27 28 //============================================================================= 29 // Helper methods for _get* and _put* bytecodes 30 //============================================================================= 31 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) { 32 // Could be the field_holder's <clinit> method, or <clinit> for a subklass. 33 // Better to check now than to Deoptimize as soon as we execute 34 assert( field->is_static(), "Only check if field is static"); 35 // is_being_initialized() is too generous. It allows access to statics 36 // by threads that are not running the <clinit> before the <clinit> finishes. 37 // return field->holder()->is_being_initialized(); 38 39 // The following restriction is correct but conservative. 40 // It is also desirable to allow compilation of methods called from <clinit> 41 // but this generated code will need to be made safe for execution by 42 // other threads, or the transition from interpreted to compiled code would 43 // need to be guarded. 44 ciInstanceKlass *field_holder = field->holder(); 45 46 bool access_OK = false; 47 if (method->holder()->is_subclass_of(field_holder)) { 48 if (method->is_static()) { 49 if (method->name() == ciSymbol::class_initializer_name()) { 50 // OK to access static fields inside initializer 51 access_OK = true; 52 } 53 } else { 54 if (method->name() == ciSymbol::object_initializer_name()) { 55 // It's also OK to access static fields inside a constructor, 56 // because any thread calling the constructor must first have 57 // synchronized on the class by executing a '_new' bytecode. 58 access_OK = true; 59 } 60 } 61 } 62 63 return access_OK; 64 65 } 66 67 68 void Parse::do_field_access(bool is_get, bool is_field) { 69 bool will_link; 70 ciField* field = iter().get_field(will_link); 71 assert(will_link, "getfield: typeflow responsibility"); 72 73 ciInstanceKlass* field_holder = field->holder(); 74 75 if (is_field == field->is_static()) { 76 // Interpreter will throw java_lang_IncompatibleClassChangeError 77 // Check this before allowing <clinit> methods to access static fields 78 uncommon_trap(Deoptimization::Reason_unhandled, 79 Deoptimization::Action_none); 80 return; 81 } 82 83 if (!is_field && !field_holder->is_initialized()) { 84 if (!static_field_ok_in_clinit(field, method())) { 85 uncommon_trap(Deoptimization::Reason_uninitialized, 86 Deoptimization::Action_reinterpret, 87 NULL, "!static_field_ok_in_clinit"); 88 return; 89 } 90 } 91 92 assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility"); 93 94 // Note: We do not check for an unloaded field type here any more. 95 96 // Generate code for the object pointer. 97 Node* obj; 98 if (is_field) { 99 int obj_depth = is_get ? 0 : field->type()->size(); 100 obj = do_null_check(peek(obj_depth), T_OBJECT); 101 // Compile-time detect of null-exception? 102 if (stopped()) return; 103 104 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); 105 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); 106 107 if (is_get) { 108 --_sp; // pop receiver before getting 109 do_get_xxx(tjp, obj, field, is_field); 110 } else { 111 do_put_xxx(tjp, obj, field, is_field); 112 --_sp; // pop receiver after putting 113 } 114 } else { 115 const TypeKlassPtr* tkp = TypeKlassPtr::make(field_holder); 116 obj = _gvn.makecon(tkp); 117 if (is_get) { 118 do_get_xxx(tkp, obj, field, is_field); 119 } else { 120 do_put_xxx(tkp, obj, field, is_field); 121 } 122 } 123 } 124 125 126 void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) { 127 // Does this field have a constant value? If so, just push the value. 128 if (field->is_constant() && push_constant(field->constant_value())) return; 129 130 ciType* field_klass = field->type(); 131 bool is_vol = field->is_volatile(); 132 133 // Compute address and memory type. 134 int offset = field->offset_in_bytes(); 135 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 136 Node *adr = basic_plus_adr(obj, obj, offset); 137 BasicType bt = field->layout_type(); 138 139 // Build the resultant type of the load 140 const Type *type; 141 142 bool must_assert_null = false; 143 144 if( bt == T_OBJECT ) { 145 if (!field->type()->is_loaded()) { 146 type = TypeInstPtr::BOTTOM; 147 must_assert_null = true; 148 } else if (field->is_constant()) { 149 // This can happen if the constant oop is non-perm. 150 ciObject* con = field->constant_value().as_object(); 151 // Do not "join" in the previous type; it doesn't add value, 152 // and may yield a vacuous result if the field is of interface type. 153 type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); 154 assert(type != NULL, "field singleton type must be consistent"); 155 } else { 156 type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 157 } 158 } else { 159 type = Type::get_const_basic_type(bt); 160 } 161 // Build the load. 162 Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol); 163 164 // Adjust Java stack 165 if (type2size[bt] == 1) 166 push(ld); 167 else 168 push_pair(ld); 169 170 if (must_assert_null) { 171 // Do not take a trap here. It's possible that the program 172 // will never load the field's class, and will happily see 173 // null values in this field forever. Don't stumble into a 174 // trap for such a program, or we might get a long series 175 // of useless recompilations. (Or, we might load a class 176 // which should not be loaded.) If we ever see a non-null 177 // value, we will then trap and recompile. (The trap will 178 // not need to mention the class index, since the class will 179 // already have been loaded if we ever see a non-null value.) 180 // uncommon_trap(iter().get_field_signature_index()); 181 #ifndef PRODUCT 182 if (PrintOpto && (Verbose || WizardMode)) { 183 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); 184 } 185 #endif 186 if (C->log() != NULL) { 187 C->log()->elem("assert_null reason='field' klass='%d'", 188 C->log()->identify(field->type())); 189 } 190 // If there is going to be a trap, put it at the next bytecode: 191 set_bci(iter().next_bci()); 192 do_null_assert(peek(), T_OBJECT); 193 set_bci(iter().cur_bci()); // put it back 194 } 195 196 // If reference is volatile, prevent following memory ops from 197 // floating up past the volatile read. Also prevents commoning 198 // another volatile read. 199 if (field->is_volatile()) { 200 // Memory barrier includes bogus read of value to force load BEFORE membar 201 insert_mem_bar(Op_MemBarAcquire, ld); 202 } 203 } 204 205 void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) { 206 bool is_vol = field->is_volatile(); 207 // If reference is volatile, prevent following memory ops from 208 // floating down past the volatile write. Also prevents commoning 209 // another volatile read. 210 if (is_vol) insert_mem_bar(Op_MemBarRelease); 211 212 // Compute address and memory type. 213 int offset = field->offset_in_bytes(); 214 const TypePtr* adr_type = C->alias_type(field)->adr_type(); 215 Node* adr = basic_plus_adr(obj, obj, offset); 216 BasicType bt = field->layout_type(); 217 // Value to be stored 218 Node* val = type2size[bt] == 1 ? pop() : pop_pair(); 219 // Round doubles before storing 220 if (bt == T_DOUBLE) val = dstore_rounding(val); 221 222 // Store the value. 223 Node* store; 224 if (bt == T_OBJECT) { 225 const TypeOopPtr* field_type; 226 if (!field->type()->is_loaded()) { 227 field_type = TypeInstPtr::BOTTOM; 228 } else { 229 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); 230 } 231 store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt); 232 } else { 233 store = store_to_memory( control(), adr, val, bt, adr_type, is_vol ); 234 } 235 236 // If reference is volatile, prevent following volatiles ops from 237 // floating up before the volatile write. 238 if (is_vol) { 239 // First place the specific membar for THIS volatile index. This first 240 // membar is dependent on the store, keeping any other membars generated 241 // below from floating up past the store. 242 int adr_idx = C->get_alias_index(adr_type); 243 insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx); 244 245 // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed 246 // volatile alias indices. Skip this if the membar is redundant. 247 if (adr_idx != Compile::AliasIdxBot) { 248 insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot); 249 } 250 251 // Finally, place alias-index-specific membars for each volatile index 252 // that isn't the adr_idx membar. Typically there's only 1 or 2. 253 for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) { 254 if (i != adr_idx && C->alias_type(i)->is_volatile()) { 255 insert_mem_bar_volatile(Op_MemBarVolatile, i); 256 } 257 } 258 } 259 260 // If the field is final, the rules of Java say we are in <init> or <clinit>. 261 // Note the presence of writes to final non-static fields, so that we 262 // can insert a memory barrier later on to keep the writes from floating 263 // out of the constructor. 264 if (is_field && field->is_final()) { 265 set_wrote_final(true); 266 } 267 } 268 269 270 bool Parse::push_constant(ciConstant constant, bool require_constant) { 271 switch (constant.basic_type()) { 272 case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break; 273 case T_INT: push( intcon(constant.as_int()) ); break; 274 case T_CHAR: push( intcon(constant.as_char()) ); break; 275 case T_BYTE: push( intcon(constant.as_byte()) ); break; 276 case T_SHORT: push( intcon(constant.as_short()) ); break; 277 case T_FLOAT: push( makecon(TypeF::make(constant.as_float())) ); break; 278 case T_DOUBLE: push_pair( makecon(TypeD::make(constant.as_double())) ); break; 279 case T_LONG: push_pair( longcon(constant.as_long()) ); break; 280 case T_ARRAY: 281 case T_OBJECT: { 282 // cases: 283 // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0) 284 // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2) 285 // An oop is not scavengable if it is in the perm gen. 286 ciObject* oop_constant = constant.as_object(); 287 if (oop_constant->is_null_object()) { 288 push( zerocon(T_OBJECT) ); 289 break; 290 } else if (require_constant || oop_constant->should_be_constant()) { 291 push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) ); 292 break; 293 } else { 294 // we cannot inline the oop, but we can use it later to narrow a type 295 return false; 296 } 297 } 298 case T_ILLEGAL: { 299 // Invalid ciConstant returned due to OutOfMemoryError in the CI 300 assert(C->env()->failing(), "otherwise should not see this"); 301 // These always occur because of object types; we are going to 302 // bail out anyway, so make the stack depths match up 303 push( zerocon(T_OBJECT) ); 304 return false; 305 } 306 default: 307 ShouldNotReachHere(); 308 return false; 309 } 310 311 // success 312 return true; 313 } 314 315 316 317 //============================================================================= 318 void Parse::do_anewarray() { 319 bool will_link; 320 ciKlass* klass = iter().get_klass(will_link); 321 322 // Uncommon Trap when class that array contains is not loaded 323 // we need the loaded class for the rest of graph; do not 324 // initialize the container class (see Java spec)!!! 325 assert(will_link, "anewarray: typeflow responsibility"); 326 327 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); 328 // Check that array_klass object is loaded 329 if (!array_klass->is_loaded()) { 330 // Generate uncommon_trap for unloaded array_class 331 uncommon_trap(Deoptimization::Reason_unloaded, 332 Deoptimization::Action_reinterpret, 333 array_klass); 334 return; 335 } 336 337 kill_dead_locals(); 338 339 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); 340 Node* count_val = pop(); 341 Node* obj = new_array(makecon(array_klass_type), count_val, 1); 342 push(obj); 343 } 344 345 346 void Parse::do_newarray(BasicType elem_type) { 347 kill_dead_locals(); 348 349 Node* count_val = pop(); 350 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); 351 Node* obj = new_array(makecon(array_klass), count_val, 1); 352 // Push resultant oop onto stack 353 push(obj); 354 } 355 356 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. 357 // Also handle the degenerate 1-dimensional case of anewarray. 358 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { 359 Node* length = lengths[0]; 360 assert(length != NULL, ""); 361 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); 362 if (ndimensions > 1) { 363 jint length_con = find_int_con(length, -1); 364 guarantee(length_con >= 0, "non-constant multianewarray"); 365 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); 366 const TypePtr* adr_type = TypeAryPtr::OOPS; 367 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); 368 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); 369 for (jint i = 0; i < length_con; i++) { 370 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); 371 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); 372 Node* eaddr = basic_plus_adr(array, offset); 373 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT); 374 } 375 } 376 return array; 377 } 378 379 void Parse::do_multianewarray() { 380 int ndimensions = iter().get_dimensions(); 381 382 // the m-dimensional array 383 bool will_link; 384 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); 385 assert(will_link, "multianewarray: typeflow responsibility"); 386 387 // Note: Array classes are always initialized; no is_initialized check. 388 389 enum { MAX_DIMENSION = 5 }; 390 if (ndimensions > MAX_DIMENSION || ndimensions <= 0) { 391 uncommon_trap(Deoptimization::Reason_unhandled, 392 Deoptimization::Action_none); 393 return; 394 } 395 396 kill_dead_locals(); 397 398 // get the lengths from the stack (first dimension is on top) 399 Node* length[MAX_DIMENSION+1]; 400 length[ndimensions] = NULL; // terminating null for make_runtime_call 401 int j; 402 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); 403 404 // The original expression was of this form: new T[length0][length1]... 405 // It is often the case that the lengths are small (except the last). 406 // If that happens, use the fast 1-d creator a constant number of times. 407 const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100); 408 jint expand_count = 1; // count of allocations in the expansion 409 jint expand_fanout = 1; // running total fanout 410 for (j = 0; j < ndimensions-1; j++) { 411 jint dim_con = find_int_con(length[j], -1); 412 expand_fanout *= dim_con; 413 expand_count += expand_fanout; // count the level-J sub-arrays 414 if (dim_con <= 0 415 || dim_con > expand_limit 416 || expand_count > expand_limit) { 417 expand_count = 0; 418 break; 419 } 420 } 421 422 // Can use multianewarray instead of [a]newarray if only one dimension, 423 // or if all non-final dimensions are small constants. 424 if (expand_count == 1 || (1 <= expand_count && expand_count <= expand_limit)) { 425 Node* obj = expand_multianewarray(array_klass, &length[0], ndimensions, ndimensions); 426 push(obj); 427 return; 428 } 429 430 address fun = NULL; 431 switch (ndimensions) { 432 //case 1: Actually, there is no case 1. It's handled by new_array. 433 case 2: fun = OptoRuntime::multianewarray2_Java(); break; 434 case 3: fun = OptoRuntime::multianewarray3_Java(); break; 435 case 4: fun = OptoRuntime::multianewarray4_Java(); break; 436 case 5: fun = OptoRuntime::multianewarray5_Java(); break; 437 default: ShouldNotReachHere(); 438 }; 439 440 Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, 441 OptoRuntime::multianewarray_Type(ndimensions), 442 fun, NULL, TypeRawPtr::BOTTOM, 443 makecon(TypeKlassPtr::make(array_klass)), 444 length[0], length[1], length[2], 445 length[3], length[4]); 446 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms)); 447 448 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); 449 450 // Improve the type: We know it's not null, exact, and of a given length. 451 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); 452 type = type->is_aryptr()->cast_to_exactness(true); 453 454 const TypeInt* ltype = _gvn.find_int_type(length[0]); 455 if (ltype != NULL) 456 type = type->is_aryptr()->cast_to_size(ltype); 457 458 // We cannot sharpen the nested sub-arrays, since the top level is mutable. 459 460 Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) ); 461 push(cast); 462 463 // Possible improvements: 464 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) 465 // - Issue CastII against length[*] values, to TypeInt::POS. 466 }