1 /* 2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_c1_CodeStubs_sparc.cpp.incl" 27 28 #define __ ce->masm()-> 29 30 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, 31 bool throw_index_out_of_bounds_exception) 32 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) 33 , _index(index) 34 { 35 _info = new CodeEmitInfo(info); 36 } 37 38 39 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 40 __ bind(_entry); 41 42 if (_index->is_register()) { 43 __ mov(_index->as_register(), G4); 44 } else { 45 __ set(_index->as_jint(), G4); 46 } 47 if (_throw_index_out_of_bounds_exception) { 48 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); 49 } else { 50 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); 51 } 52 __ delayed()->nop(); 53 ce->add_call_info_here(_info); 54 ce->verify_oop_map(_info); 55 #ifdef ASSERT 56 __ should_not_reach_here(); 57 #endif 58 } 59 60 61 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 62 __ bind(_entry); 63 __ set(_bci, G4); 64 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); 65 __ delayed()->mov_or_nop(_method->as_register(), G5); 66 ce->add_call_info_here(_info); 67 ce->verify_oop_map(_info); 68 69 __ br(Assembler::always, true, Assembler::pt, _continuation); 70 __ delayed()->nop(); 71 } 72 73 74 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 75 if (_offset != -1) { 76 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 77 } 78 __ bind(_entry); 79 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type); 80 __ delayed()->nop(); 81 ce->add_call_info_here(_info); 82 ce->verify_oop_map(_info); 83 #ifdef ASSERT 84 __ should_not_reach_here(); 85 #endif 86 } 87 88 89 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 90 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 91 __ bind(_entry); 92 __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id), 93 relocInfo::runtime_call_type); 94 __ delayed()->nop(); 95 ce->add_call_info_here(_info); 96 ce->verify_oop_map(_info); 97 #ifdef ASSERT 98 __ should_not_reach_here(); 99 #endif 100 } 101 102 103 // Implementation of SimpleExceptionStub 104 // Note: %g1 and %g3 are already in use 105 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 106 __ bind(_entry); 107 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type); 108 109 if (_obj->is_valid()) { 110 __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub 111 } else { 112 __ delayed()->mov(G0, G4); 113 } 114 ce->add_call_info_here(_info); 115 #ifdef ASSERT 116 __ should_not_reach_here(); 117 #endif 118 } 119 120 121 // Implementation of ArrayStoreExceptionStub 122 123 ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info): 124 _info(info) { 125 } 126 127 128 void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) { 129 __ bind(_entry); 130 __ call(Runtime1::entry_for(Runtime1::throw_array_store_exception_id), relocInfo::runtime_call_type); 131 __ delayed()->nop(); 132 ce->add_call_info_here(_info); 133 ce->verify_oop_map(_info); 134 #ifdef ASSERT 135 __ should_not_reach_here(); 136 #endif 137 } 138 139 140 141 142 // Implementation of NewInstanceStub 143 144 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 145 _result = result; 146 _klass = klass; 147 _klass_reg = klass_reg; 148 _info = new CodeEmitInfo(info); 149 assert(stub_id == Runtime1::new_instance_id || 150 stub_id == Runtime1::fast_new_instance_id || 151 stub_id == Runtime1::fast_new_instance_init_check_id, 152 "need new_instance id"); 153 _stub_id = stub_id; 154 } 155 156 157 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 158 __ bind(_entry); 159 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type); 160 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5); 161 ce->add_call_info_here(_info); 162 ce->verify_oop_map(_info); 163 __ br(Assembler::always, false, Assembler::pt, _continuation); 164 __ delayed()->mov_or_nop(O0, _result->as_register()); 165 } 166 167 168 // Implementation of NewTypeArrayStub 169 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 170 _klass_reg = klass_reg; 171 _length = length; 172 _result = result; 173 _info = new CodeEmitInfo(info); 174 } 175 176 177 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 178 __ bind(_entry); 179 180 __ mov(_length->as_register(), G4); 181 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type); 182 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5); 183 ce->add_call_info_here(_info); 184 ce->verify_oop_map(_info); 185 __ br(Assembler::always, false, Assembler::pt, _continuation); 186 __ delayed()->mov_or_nop(O0, _result->as_register()); 187 } 188 189 190 // Implementation of NewObjectArrayStub 191 192 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 193 _klass_reg = klass_reg; 194 _length = length; 195 _result = result; 196 _info = new CodeEmitInfo(info); 197 } 198 199 200 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 201 __ bind(_entry); 202 203 __ mov(_length->as_register(), G4); 204 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); 205 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5); 206 ce->add_call_info_here(_info); 207 ce->verify_oop_map(_info); 208 __ br(Assembler::always, false, Assembler::pt, _continuation); 209 __ delayed()->mov_or_nop(O0, _result->as_register()); 210 } 211 212 213 // Implementation of MonitorAccessStubs 214 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 215 : MonitorAccessStub(obj_reg, lock_reg) { 216 _info = new CodeEmitInfo(info); 217 } 218 219 220 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 221 __ bind(_entry); 222 __ mov(_obj_reg->as_register(), G4); 223 if (ce->compilation()->has_fpu_code()) { 224 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type); 225 } else { 226 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type); 227 } 228 __ delayed()->mov_or_nop(_lock_reg->as_register(), G5); 229 ce->add_call_info_here(_info); 230 ce->verify_oop_map(_info); 231 __ br(Assembler::always, true, Assembler::pt, _continuation); 232 __ delayed()->nop(); 233 } 234 235 236 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 237 __ bind(_entry); 238 if (_compute_lock) { 239 ce->monitor_address(_monitor_ix, _lock_reg); 240 } 241 if (ce->compilation()->has_fpu_code()) { 242 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type); 243 } else { 244 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type); 245 } 246 247 __ delayed()->mov_or_nop(_lock_reg->as_register(), G4); 248 __ br(Assembler::always, true, Assembler::pt, _continuation); 249 __ delayed()->nop(); 250 } 251 252 // Implementation of patching: 253 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 254 // - Replace original code with a call to the stub 255 // At Runtime: 256 // - call to stub, jump to runtime 257 // - in runtime: preserve all registers (especially objects, i.e., source and destination object) 258 // - in runtime: after initializing class, restore original code, reexecute instruction 259 260 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 261 262 void PatchingStub::align_patch_site(MacroAssembler* ) { 263 // patch sites on sparc are always properly aligned. 264 } 265 266 void PatchingStub::emit_code(LIR_Assembler* ce) { 267 // copy original code here 268 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, 269 "not enough room for call"); 270 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes"); 271 272 Label call_patch; 273 274 int being_initialized_entry = __ offset(); 275 276 if (_id == load_klass_id) { 277 // produce a copy of the load klass instruction for use by the being initialized case 278 #ifdef ASSERT 279 address start = __ pc(); 280 #endif 281 AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index)); 282 __ patchable_set(addrlit, _obj); 283 284 #ifdef ASSERT 285 for (int i = 0; i < _bytes_to_copy; i++) { 286 address ptr = (address)(_pc_start + i); 287 int a_byte = (*ptr) & 0xFF; 288 assert(a_byte == *start++, "should be the same code"); 289 } 290 #endif 291 } else { 292 // make a copy the code which is going to be patched. 293 for (int i = 0; i < _bytes_to_copy; i++) { 294 address ptr = (address)(_pc_start + i); 295 int a_byte = (*ptr) & 0xFF; 296 __ a_byte (a_byte); 297 } 298 } 299 300 address end_of_patch = __ pc(); 301 int bytes_to_skip = 0; 302 if (_id == load_klass_id) { 303 int offset = __ offset(); 304 if (CommentedAssembly) { 305 __ block_comment(" being_initialized check"); 306 } 307 308 // static field accesses have special semantics while the class 309 // initializer is being run so we emit a test which can be used to 310 // check that this code is being executed by the initializing 311 // thread. 312 assert(_obj != noreg, "must be a valid register"); 313 assert(_oop_index >= 0, "must have oop index"); 314 __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3); 315 __ cmp(G2_thread, G3); 316 __ br(Assembler::notEqual, false, Assembler::pn, call_patch); 317 __ delayed()->nop(); 318 319 // load_klass patches may execute the patched code before it's 320 // copied back into place so we need to jump back into the main 321 // code of the nmethod to continue execution. 322 __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation); 323 __ delayed()->nop(); 324 325 // make sure this extra code gets skipped 326 bytes_to_skip += __ offset() - offset; 327 } 328 329 // Now emit the patch record telling the runtime how to find the 330 // pieces of the patch. We only need 3 bytes but it has to be 331 // aligned as an instruction so emit 4 bytes. 332 int sizeof_patch_record = 4; 333 bytes_to_skip += sizeof_patch_record; 334 335 // emit the offsets needed to find the code to patch 336 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; 337 338 // Emit the patch record. We need to emit a full word, so emit an extra empty byte 339 __ a_byte(0); 340 __ a_byte(being_initialized_entry_offset); 341 __ a_byte(bytes_to_skip); 342 __ a_byte(_bytes_to_copy); 343 address patch_info_pc = __ pc(); 344 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 345 346 address entry = __ pc(); 347 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 348 address target = NULL; 349 switch (_id) { 350 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 351 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; 352 default: ShouldNotReachHere(); 353 } 354 __ bind(call_patch); 355 356 if (CommentedAssembly) { 357 __ block_comment("patch entry point"); 358 } 359 __ call(target, relocInfo::runtime_call_type); 360 __ delayed()->nop(); 361 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 362 ce->add_call_info_here(_info); 363 __ br(Assembler::always, false, Assembler::pt, _patch_site_entry); 364 __ delayed()->nop(); 365 if (_id == load_klass_id) { 366 CodeSection* cs = __ code_section(); 367 address pc = (address)_pc_start; 368 RelocIterator iter(cs, pc, pc + 1); 369 relocInfo::change_reloc_info_for_address(&iter, (address) pc, relocInfo::oop_type, relocInfo::none); 370 371 pc = (address)(_pc_start + NativeMovConstReg::add_offset); 372 RelocIterator iter2(cs, pc, pc+1); 373 relocInfo::change_reloc_info_for_address(&iter2, (address) pc, relocInfo::oop_type, relocInfo::none); 374 } 375 376 } 377 378 379 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 380 __ bind(_entry); 381 __ call(SharedRuntime::deopt_blob()->unpack_with_reexecution()); 382 __ delayed()->nop(); 383 ce->add_call_info_here(_info); 384 debug_only(__ should_not_reach_here()); 385 } 386 387 388 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 389 //---------------slow case: call to native----------------- 390 __ bind(_entry); 391 __ mov(src()->as_register(), O0); 392 __ mov(src_pos()->as_register(), O1); 393 __ mov(dst()->as_register(), O2); 394 __ mov(dst_pos()->as_register(), O3); 395 __ mov(length()->as_register(), O4); 396 397 ce->emit_static_call_stub(); 398 399 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); 400 __ delayed()->nop(); 401 ce->add_call_info_here(info()); 402 ce->verify_oop_map(info()); 403 404 #ifndef PRODUCT 405 __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0); 406 __ ld(O0, 0, O1); 407 __ inc(O1); 408 __ st(O1, 0, O0); 409 #endif 410 411 __ br(Assembler::always, false, Assembler::pt, _continuation); 412 __ delayed()->nop(); 413 } 414 415 416 /////////////////////////////////////////////////////////////////////////////////// 417 #ifndef SERIALGC 418 419 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 420 __ bind(_entry); 421 422 assert(pre_val()->is_register(), "Precondition."); 423 424 Register pre_val_reg = pre_val()->as_register(); 425 426 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false); 427 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, 428 pre_val_reg, _continuation); 429 __ delayed()->nop(); 430 431 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id)); 432 __ delayed()->mov(pre_val_reg, G4); 433 __ br(Assembler::always, false, Assembler::pt, _continuation); 434 __ delayed()->nop(); 435 436 } 437 438 jbyte* G1PostBarrierStub::_byte_map_base = NULL; 439 440 jbyte* G1PostBarrierStub::byte_map_base_slow() { 441 BarrierSet* bs = Universe::heap()->barrier_set(); 442 assert(bs->is_a(BarrierSet::G1SATBCTLogging), 443 "Must be if we're using this."); 444 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base; 445 } 446 447 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 448 __ bind(_entry); 449 450 assert(addr()->is_register(), "Precondition."); 451 assert(new_val()->is_register(), "Precondition."); 452 Register addr_reg = addr()->as_pointer_register(); 453 Register new_val_reg = new_val()->as_register(); 454 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, 455 new_val_reg, _continuation); 456 __ delayed()->nop(); 457 458 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id)); 459 __ delayed()->mov(addr_reg, G4); 460 __ br(Assembler::always, false, Assembler::pt, _continuation); 461 __ delayed()->nop(); 462 } 463 464 #endif // SERIALGC 465 /////////////////////////////////////////////////////////////////////////////////// 466 467 #undef __