1 /* 2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_nativeInst_sparc.cpp.incl" 27 28 29 bool NativeInstruction::is_dtrace_trap() { 30 return !is_nop(); 31 } 32 33 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) { 34 ResourceMark rm; 35 CodeBuffer buf(instaddr, 10 * BytesPerInstWord ); 36 MacroAssembler* _masm = new MacroAssembler(&buf); 37 Register destreg; 38 39 destreg = inv_rd(*(unsigned int *)instaddr); 40 // Generate a the new sequence 41 _masm->patchable_sethi(x, destreg); 42 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord); 43 } 44 45 void NativeInstruction::verify() { 46 // make sure code pattern is actually an instruction address 47 address addr = addr_at(0); 48 if (addr == 0 || ((intptr_t)addr & 3) != 0) { 49 fatal("not an instruction address"); 50 } 51 } 52 53 void NativeInstruction::print() { 54 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0)); 55 } 56 57 void NativeInstruction::set_long_at(int offset, int i) { 58 address addr = addr_at(offset); 59 *(int*)addr = i; 60 ICache::invalidate_word(addr); 61 } 62 63 void NativeInstruction::set_jlong_at(int offset, jlong i) { 64 address addr = addr_at(offset); 65 *(jlong*)addr = i; 66 // Don't need to invalidate 2 words here, because 67 // the flush instruction operates on doublewords. 68 ICache::invalidate_word(addr); 69 } 70 71 void NativeInstruction::set_addr_at(int offset, address x) { 72 address addr = addr_at(offset); 73 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment"); 74 *(uintptr_t*)addr = (uintptr_t)x; 75 // Don't need to invalidate 2 words here in the 64-bit case, 76 // because the flush instruction operates on doublewords. 77 ICache::invalidate_word(addr); 78 // The Intel code has this assertion for NativeCall::set_destination, 79 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset, 80 // NativeJump::set_jump_destination, and NativePushImm32::set_data 81 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction") 82 } 83 84 bool NativeInstruction::is_zero_test(Register ®) { 85 int x = long_at(0); 86 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3); 87 if (is_op3(x, temp, Assembler::arith_op) && 88 inv_immed(x) && inv_rd(x) == G0) { 89 if (inv_rs1(x) == G0) { 90 reg = inv_rs2(x); 91 return true; 92 } else if (inv_rs2(x) == G0) { 93 reg = inv_rs1(x); 94 return true; 95 } 96 } 97 return false; 98 } 99 100 bool NativeInstruction::is_load_store_with_small_offset(Register reg) { 101 int x = long_at(0); 102 if (is_op(x, Assembler::ldst_op) && 103 inv_rs1(x) == reg && inv_immed(x)) { 104 return true; 105 } 106 return false; 107 } 108 109 void NativeCall::verify() { 110 NativeInstruction::verify(); 111 // make sure code pattern is actually a call instruction 112 if (!is_op(long_at(0), Assembler::call_op)) { 113 fatal("not a call"); 114 } 115 } 116 117 void NativeCall::print() { 118 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); 119 } 120 121 122 // MT-safe patching of a call instruction (and following word). 123 // First patches the second word, and then atomicly replaces 124 // the first word with the first new instruction word. 125 // Other processors might briefly see the old first word 126 // followed by the new second word. This is OK if the old 127 // second word is harmless, and the new second word may be 128 // harmlessly executed in the delay slot of the call. 129 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { 130 assert(Patching_lock->is_locked() || 131 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 132 assert (instr_addr != NULL, "illegal address for code patching"); 133 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call 134 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8"); 135 int i0 = ((int*)code_buffer)[0]; 136 int i1 = ((int*)code_buffer)[1]; 137 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord); 138 assert(inv_op(*contention_addr) == Assembler::arith_op || 139 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), 140 "must not interfere with original call"); 141 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order 142 n_call->set_long_at(1*BytesPerInstWord, i1); 143 n_call->set_long_at(0*BytesPerInstWord, i0); 144 // NOTE: It is possible that another thread T will execute 145 // only the second patched word. 146 // In other words, since the original instruction is this 147 // call patching_stub; nop (NativeCall) 148 // and the new sequence from the buffer is this: 149 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg) 150 // what T will execute is this: 151 // call patching_stub; add %r, %lo(K), %r 152 // thereby putting garbage into %r before calling the patching stub. 153 // This is OK, because the patching stub ignores the value of %r. 154 155 // Make sure the first-patched instruction, which may co-exist 156 // briefly with the call, will do something harmless. 157 assert(inv_op(*contention_addr) == Assembler::arith_op || 158 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), 159 "must not interfere with original call"); 160 } 161 162 // Similar to replace_mt_safe, but just changes the destination. The 163 // important thing is that free-running threads are able to execute this 164 // call instruction at all times. Thus, the displacement field must be 165 // instruction-word-aligned. This is always true on SPARC. 166 // 167 // Used in the runtime linkage of calls; see class CompiledIC. 168 void NativeCall::set_destination_mt_safe(address dest) { 169 assert(Patching_lock->is_locked() || 170 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 171 // set_destination uses set_long_at which does the ICache::invalidate 172 set_destination(dest); 173 } 174 175 // Code for unit testing implementation of NativeCall class 176 void NativeCall::test() { 177 #ifdef ASSERT 178 ResourceMark rm; 179 CodeBuffer cb("test", 100, 100); 180 MacroAssembler* a = new MacroAssembler(&cb); 181 NativeCall *nc; 182 uint idx; 183 int offsets[] = { 184 0x0, 185 0xfffffff0, 186 0x7ffffff0, 187 0x80000000, 188 0x20, 189 0x4000, 190 }; 191 192 VM_Version::allow_all(); 193 194 a->call( a->pc(), relocInfo::none ); 195 a->delayed()->nop(); 196 nc = nativeCall_at( cb.code_begin() ); 197 nc->print(); 198 199 nc = nativeCall_overwriting_at( nc->next_instruction_address() ); 200 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 201 nc->set_destination( cb.code_begin() + offsets[idx] ); 202 assert(nc->destination() == (cb.code_begin() + offsets[idx]), "check unit test"); 203 nc->print(); 204 } 205 206 nc = nativeCall_before( cb.code_begin() + 8 ); 207 nc->print(); 208 209 VM_Version::revert(); 210 #endif 211 } 212 // End code for unit testing implementation of NativeCall class 213 214 //------------------------------------------------------------------- 215 216 #ifdef _LP64 217 218 void NativeFarCall::set_destination(address dest) { 219 // Address materialized in the instruction stream, so nothing to do. 220 return; 221 #if 0 // What we'd do if we really did want to change the destination 222 if (destination() == dest) { 223 return; 224 } 225 ResourceMark rm; 226 CodeBuffer buf(addr_at(0), instruction_size + 1); 227 MacroAssembler* _masm = new MacroAssembler(&buf); 228 // Generate the new sequence 229 AddressLiteral(dest); 230 _masm->jumpl_to(dest, O7, O7); 231 ICache::invalidate_range(addr_at(0), instruction_size ); 232 #endif 233 } 234 235 void NativeFarCall::verify() { 236 // make sure code pattern is actually a jumpl_to instruction 237 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to"); 238 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); 239 nativeJump_at(addr_at(0))->verify(); 240 } 241 242 bool NativeFarCall::is_call_at(address instr) { 243 return nativeInstruction_at(instr)->is_sethi(); 244 } 245 246 void NativeFarCall::print() { 247 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); 248 } 249 250 bool NativeFarCall::destination_is_compiled_verified_entry_point() { 251 nmethod* callee = CodeCache::find_nmethod(destination()); 252 if (callee == NULL) { 253 return false; 254 } else { 255 return destination() == callee->verified_entry_point(); 256 } 257 } 258 259 // MT-safe patching of a far call. 260 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) { 261 Unimplemented(); 262 } 263 264 // Code for unit testing implementation of NativeFarCall class 265 void NativeFarCall::test() { 266 Unimplemented(); 267 } 268 // End code for unit testing implementation of NativeFarCall class 269 270 #endif // _LP64 271 272 //------------------------------------------------------------------- 273 274 275 void NativeMovConstReg::verify() { 276 NativeInstruction::verify(); 277 // make sure code pattern is actually a "set_oop" synthetic instruction 278 // see MacroAssembler::set_oop() 279 int i0 = long_at(sethi_offset); 280 int i1 = long_at(add_offset); 281 282 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg" 283 Register rd = inv_rd(i0); 284 #ifndef _LP64 285 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && 286 is_op3(i1, Assembler::add_op3, Assembler::arith_op) && 287 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && 288 rd == inv_rs1(i1) && rd == inv_rd(i1))) { 289 fatal("not a set_oop"); 290 } 291 #else 292 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { 293 fatal("not a set_oop"); 294 } 295 #endif 296 } 297 298 299 void NativeMovConstReg::print() { 300 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); 301 } 302 303 304 #ifdef _LP64 305 intptr_t NativeMovConstReg::data() const { 306 return data64(addr_at(sethi_offset), long_at(add_offset)); 307 } 308 #else 309 intptr_t NativeMovConstReg::data() const { 310 return data32(long_at(sethi_offset), long_at(add_offset)); 311 } 312 #endif 313 314 315 void NativeMovConstReg::set_data(intptr_t x) { 316 #ifdef _LP64 317 set_data64_sethi(addr_at(sethi_offset), x); 318 #else 319 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x)); 320 #endif 321 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x)); 322 323 // also store the value into an oop_Relocation cell, if any 324 CodeBlob* cb = CodeCache::find_blob(instruction_address()); 325 if (cb != NULL) { 326 nmethod* nm = cb->as_nmethod_or_null(); 327 assert(nm, "must be"); 328 RelocIterator iter(nm, instruction_address(), next_instruction_address()); 329 oop* oop_addr = NULL; 330 while (iter.next()) { 331 if (iter.type() == relocInfo::oop_type) { 332 oop_Relocation *r = iter.oop_reloc(); 333 if (oop_addr == NULL) { 334 oop_addr = r->oop_addr(); 335 *oop_addr = (oop)x; 336 } else { 337 assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); 338 } 339 } 340 } 341 } 342 } 343 344 345 // Code for unit testing implementation of NativeMovConstReg class 346 void NativeMovConstReg::test() { 347 #ifdef ASSERT 348 ResourceMark rm; 349 CodeBuffer cb("test", 100, 100); 350 MacroAssembler* a = new MacroAssembler(&cb); 351 NativeMovConstReg* nm; 352 uint idx; 353 int offsets[] = { 354 0x0, 355 0x7fffffff, 356 0x80000000, 357 0xffffffff, 358 0x20, 359 4096, 360 4097, 361 }; 362 363 VM_Version::allow_all(); 364 365 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); 366 a->sethi(al1, I3); 367 a->add(I3, al1.low10(), I3); 368 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); 369 a->sethi(al2, O2); 370 a->add(O2, al2.low10(), O2); 371 372 nm = nativeMovConstReg_at( cb.code_begin() ); 373 nm->print(); 374 375 nm = nativeMovConstReg_at( nm->next_instruction_address() ); 376 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 377 nm->set_data( offsets[idx] ); 378 assert(nm->data() == offsets[idx], "check unit test"); 379 } 380 nm->print(); 381 382 VM_Version::revert(); 383 #endif 384 } 385 // End code for unit testing implementation of NativeMovConstReg class 386 387 //------------------------------------------------------------------- 388 389 void NativeMovConstRegPatching::verify() { 390 NativeInstruction::verify(); 391 // Make sure code pattern is sethi/nop/add. 392 int i0 = long_at(sethi_offset); 393 int i1 = long_at(nop_offset); 394 int i2 = long_at(add_offset); 395 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); 396 397 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg" 398 // The casual reader should note that on Sparc a nop is a special case if sethi 399 // in which the destination register is %g0. 400 Register rd0 = inv_rd(i0); 401 Register rd1 = inv_rd(i1); 402 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 && 403 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi 404 is_op3(i2, Assembler::add_op3, Assembler::arith_op) && 405 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) && 406 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) { 407 fatal("not a set_oop"); 408 } 409 } 410 411 412 void NativeMovConstRegPatching::print() { 413 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); 414 } 415 416 417 int NativeMovConstRegPatching::data() const { 418 #ifdef _LP64 419 return data64(addr_at(sethi_offset), long_at(add_offset)); 420 #else 421 return data32(long_at(sethi_offset), long_at(add_offset)); 422 #endif 423 } 424 425 426 void NativeMovConstRegPatching::set_data(int x) { 427 #ifdef _LP64 428 set_data64_sethi(addr_at(sethi_offset), x); 429 #else 430 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x)); 431 #endif 432 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x)); 433 434 // also store the value into an oop_Relocation cell, if any 435 CodeBlob* cb = CodeCache::find_blob(instruction_address()); 436 if (cb != NULL) { 437 nmethod* nm = cb->as_nmethod_or_null(); 438 assert(nm, "must be"); 439 RelocIterator iter(nm, instruction_address(), next_instruction_address()); 440 oop* oop_addr = NULL; 441 while (iter.next()) { 442 if (iter.type() == relocInfo::oop_type) { 443 oop_Relocation *r = iter.oop_reloc(); 444 if (oop_addr == NULL) { 445 oop_addr = r->oop_addr(); 446 *oop_addr = (oop)x; 447 } else { 448 assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); 449 } 450 } 451 } 452 } 453 } 454 455 456 // Code for unit testing implementation of NativeMovConstRegPatching class 457 void NativeMovConstRegPatching::test() { 458 #ifdef ASSERT 459 ResourceMark rm; 460 CodeBuffer cb("test", 100, 100); 461 MacroAssembler* a = new MacroAssembler(&cb); 462 NativeMovConstRegPatching* nm; 463 uint idx; 464 int offsets[] = { 465 0x0, 466 0x7fffffff, 467 0x80000000, 468 0xffffffff, 469 0x20, 470 4096, 471 4097, 472 }; 473 474 VM_Version::allow_all(); 475 476 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); 477 a->sethi(al1, I3); 478 a->nop(); 479 a->add(I3, al1.low10(), I3); 480 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); 481 a->sethi(al2, O2); 482 a->nop(); 483 a->add(O2, al2.low10(), O2); 484 485 nm = nativeMovConstRegPatching_at( cb.code_begin() ); 486 nm->print(); 487 488 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() ); 489 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 490 nm->set_data( offsets[idx] ); 491 assert(nm->data() == offsets[idx], "check unit test"); 492 } 493 nm->print(); 494 495 VM_Version::revert(); 496 #endif // ASSERT 497 } 498 // End code for unit testing implementation of NativeMovConstRegPatching class 499 500 501 //------------------------------------------------------------------- 502 503 504 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) { 505 Untested("copy_instruction_to"); 506 int instruction_size = next_instruction_address() - instruction_address(); 507 for (int i = 0; i < instruction_size; i += BytesPerInstWord) { 508 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i); 509 } 510 } 511 512 513 void NativeMovRegMem::verify() { 514 NativeInstruction::verify(); 515 // make sure code pattern is actually a "ld" or "st" of some sort. 516 int i0 = long_at(0); 517 int op3 = inv_op3(i0); 518 519 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok"); 520 521 if (!(is_op(i0, Assembler::ldst_op) && 522 inv_immed(i0) && 523 0 != (op3 < op3_ldst_int_limit 524 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) 525 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) 526 { 527 int i1 = long_at(ldst_offset); 528 Register rd = inv_rd(i0); 529 530 op3 = inv_op3(i1); 531 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && 532 0 != (op3 < op3_ldst_int_limit 533 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) 534 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { 535 fatal("not a ld* or st* op"); 536 } 537 } 538 } 539 540 541 void NativeMovRegMem::print() { 542 if (is_immediate()) { 543 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); 544 } else { 545 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); 546 } 547 } 548 549 550 // Code for unit testing implementation of NativeMovRegMem class 551 void NativeMovRegMem::test() { 552 #ifdef ASSERT 553 ResourceMark rm; 554 CodeBuffer cb("test", 1000, 1000); 555 MacroAssembler* a = new MacroAssembler(&cb); 556 NativeMovRegMem* nm; 557 uint idx = 0; 558 uint idx1; 559 int offsets[] = { 560 0x0, 561 0xffffffff, 562 0x7fffffff, 563 0x80000000, 564 4096, 565 4097, 566 0x20, 567 0x4000, 568 }; 569 570 VM_Version::allow_all(); 571 572 AddressLiteral al1(0xffffffff, relocInfo::external_word_type); 573 AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type); 574 a->ldsw( G5, al1.low10(), G4 ); idx++; 575 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 576 a->ldsw( G5, I3, G4 ); idx++; 577 a->ldsb( G5, al1.low10(), G4 ); idx++; 578 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 579 a->ldsb( G5, I3, G4 ); idx++; 580 a->ldsh( G5, al1.low10(), G4 ); idx++; 581 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 582 a->ldsh( G5, I3, G4 ); idx++; 583 a->lduw( G5, al1.low10(), G4 ); idx++; 584 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 585 a->lduw( G5, I3, G4 ); idx++; 586 a->ldub( G5, al1.low10(), G4 ); idx++; 587 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 588 a->ldub( G5, I3, G4 ); idx++; 589 a->lduh( G5, al1.low10(), G4 ); idx++; 590 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 591 a->lduh( G5, I3, G4 ); idx++; 592 a->ldx( G5, al1.low10(), G4 ); idx++; 593 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 594 a->ldx( G5, I3, G4 ); idx++; 595 a->ldd( G5, al1.low10(), G4 ); idx++; 596 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 597 a->ldd( G5, I3, G4 ); idx++; 598 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; 599 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 600 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; 601 602 a->stw( G5, G4, al1.low10() ); idx++; 603 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 604 a->stw( G5, G4, I3 ); idx++; 605 a->stb( G5, G4, al1.low10() ); idx++; 606 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 607 a->stb( G5, G4, I3 ); idx++; 608 a->sth( G5, G4, al1.low10() ); idx++; 609 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 610 a->sth( G5, G4, I3 ); idx++; 611 a->stx( G5, G4, al1.low10() ); idx++; 612 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 613 a->stx( G5, G4, I3 ); idx++; 614 a->std( G5, G4, al1.low10() ); idx++; 615 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 616 a->std( G5, G4, I3 ); idx++; 617 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; 618 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 619 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; 620 621 nm = nativeMovRegMem_at( cb.code_begin() ); 622 nm->print(); 623 nm->set_offset( low10(0) ); 624 nm->print(); 625 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); 626 nm->print(); 627 628 while (--idx) { 629 nm = nativeMovRegMem_at( nm->next_instruction_address() ); 630 nm->print(); 631 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { 632 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); 633 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), 634 "check unit test"); 635 nm->print(); 636 } 637 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); 638 nm->print(); 639 } 640 641 VM_Version::revert(); 642 #endif // ASSERT 643 } 644 645 // End code for unit testing implementation of NativeMovRegMem class 646 647 //-------------------------------------------------------------------------------- 648 649 650 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) { 651 Untested("copy_instruction_to"); 652 int instruction_size = next_instruction_address() - instruction_address(); 653 for (int i = 0; i < instruction_size; i += wordSize) { 654 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i); 655 } 656 } 657 658 659 void NativeMovRegMemPatching::verify() { 660 NativeInstruction::verify(); 661 // make sure code pattern is actually a "ld" or "st" of some sort. 662 int i0 = long_at(0); 663 int op3 = inv_op3(i0); 664 665 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); 666 667 if (!(is_op(i0, Assembler::ldst_op) && 668 inv_immed(i0) && 669 0 != (op3 < op3_ldst_int_limit 670 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) 671 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) { 672 int i1 = long_at(ldst_offset); 673 Register rd = inv_rd(i0); 674 675 op3 = inv_op3(i1); 676 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && 677 0 != (op3 < op3_ldst_int_limit 678 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) 679 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { 680 fatal("not a ld* or st* op"); 681 } 682 } 683 } 684 685 686 void NativeMovRegMemPatching::print() { 687 if (is_immediate()) { 688 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); 689 } else { 690 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); 691 } 692 } 693 694 695 // Code for unit testing implementation of NativeMovRegMemPatching class 696 void NativeMovRegMemPatching::test() { 697 #ifdef ASSERT 698 ResourceMark rm; 699 CodeBuffer cb("test", 1000, 1000); 700 MacroAssembler* a = new MacroAssembler(&cb); 701 NativeMovRegMemPatching* nm; 702 uint idx = 0; 703 uint idx1; 704 int offsets[] = { 705 0x0, 706 0xffffffff, 707 0x7fffffff, 708 0x80000000, 709 4096, 710 4097, 711 0x20, 712 0x4000, 713 }; 714 715 VM_Version::allow_all(); 716 717 AddressLiteral al(0xffffffff, relocInfo::external_word_type); 718 a->ldsw( G5, al.low10(), G4); idx++; 719 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 720 a->ldsw( G5, I3, G4 ); idx++; 721 a->ldsb( G5, al.low10(), G4); idx++; 722 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 723 a->ldsb( G5, I3, G4 ); idx++; 724 a->ldsh( G5, al.low10(), G4); idx++; 725 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 726 a->ldsh( G5, I3, G4 ); idx++; 727 a->lduw( G5, al.low10(), G4); idx++; 728 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 729 a->lduw( G5, I3, G4 ); idx++; 730 a->ldub( G5, al.low10(), G4); idx++; 731 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 732 a->ldub( G5, I3, G4 ); idx++; 733 a->lduh( G5, al.low10(), G4); idx++; 734 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 735 a->lduh( G5, I3, G4 ); idx++; 736 a->ldx( G5, al.low10(), G4); idx++; 737 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 738 a->ldx( G5, I3, G4 ); idx++; 739 a->ldd( G5, al.low10(), G4); idx++; 740 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 741 a->ldd( G5, I3, G4 ); idx++; 742 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; 743 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 744 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; 745 746 a->stw( G5, G4, al.low10()); idx++; 747 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 748 a->stw( G5, G4, I3 ); idx++; 749 a->stb( G5, G4, al.low10()); idx++; 750 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 751 a->stb( G5, G4, I3 ); idx++; 752 a->sth( G5, G4, al.low10()); idx++; 753 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 754 a->sth( G5, G4, I3 ); idx++; 755 a->stx( G5, G4, al.low10()); idx++; 756 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 757 a->stx( G5, G4, I3 ); idx++; 758 a->std( G5, G4, al.low10()); idx++; 759 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 760 a->std( G5, G4, I3 ); idx++; 761 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; 762 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 763 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; 764 765 nm = nativeMovRegMemPatching_at( cb.code_begin() ); 766 nm->print(); 767 nm->set_offset( low10(0) ); 768 nm->print(); 769 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); 770 nm->print(); 771 772 while (--idx) { 773 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() ); 774 nm->print(); 775 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { 776 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); 777 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), 778 "check unit test"); 779 nm->print(); 780 } 781 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); 782 nm->print(); 783 } 784 785 VM_Version::revert(); 786 #endif // ASSERT 787 } 788 // End code for unit testing implementation of NativeMovRegMemPatching class 789 790 791 //-------------------------------------------------------------------------------- 792 793 794 void NativeJump::verify() { 795 NativeInstruction::verify(); 796 int i0 = long_at(sethi_offset); 797 int i1 = long_at(jmpl_offset); 798 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); 799 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg" 800 Register rd = inv_rd(i0); 801 #ifndef _LP64 802 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && 803 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) || 804 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) && 805 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && 806 rd == inv_rs1(i1))) { 807 fatal("not a jump_to instruction"); 808 } 809 #else 810 // In LP64, the jump instruction location varies for non relocatable 811 // jumps, for example is could be sethi, xor, jmp instead of the 812 // 7 instructions for sethi. So let's check sethi only. 813 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { 814 fatal("not a jump_to instruction"); 815 } 816 #endif 817 } 818 819 820 void NativeJump::print() { 821 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination()); 822 } 823 824 825 // Code for unit testing implementation of NativeJump class 826 void NativeJump::test() { 827 #ifdef ASSERT 828 ResourceMark rm; 829 CodeBuffer cb("test", 100, 100); 830 MacroAssembler* a = new MacroAssembler(&cb); 831 NativeJump* nj; 832 uint idx; 833 int offsets[] = { 834 0x0, 835 0xffffffff, 836 0x7fffffff, 837 0x80000000, 838 4096, 839 4097, 840 0x20, 841 0x4000, 842 }; 843 844 VM_Version::allow_all(); 845 846 AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type); 847 a->sethi(al, I3); 848 a->jmpl(I3, al.low10(), G0, RelocationHolder::none); 849 a->delayed()->nop(); 850 a->sethi(al, I3); 851 a->jmpl(I3, al.low10(), L3, RelocationHolder::none); 852 a->delayed()->nop(); 853 854 nj = nativeJump_at( cb.code_begin() ); 855 nj->print(); 856 857 nj = nativeJump_at( nj->next_instruction_address() ); 858 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 859 nj->set_jump_destination( nj->instruction_address() + offsets[idx] ); 860 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test"); 861 nj->print(); 862 } 863 864 VM_Version::revert(); 865 #endif // ASSERT 866 } 867 // End code for unit testing implementation of NativeJump class 868 869 870 void NativeJump::insert(address code_pos, address entry) { 871 Unimplemented(); 872 } 873 874 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) 875 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot). 876 // Atomic write can be only with 1 word. 877 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { 878 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere 879 // in the header of the nmethod, within a short branch's span of the patch point. 880 // Set up the jump sequence using NativeJump::insert, and then use an annulled 881 // unconditional branch at the target site (an atomic 1-word update). 882 // Limitations: You can only patch nmethods, with any given nmethod patched at 883 // most once, and the patch must be in the nmethod's header. 884 // It's messy, but you can ask the CodeCache for the nmethod containing the 885 // target address. 886 887 // %%%%% For now, do something MT-stupid: 888 ResourceMark rm; 889 int code_size = 1 * BytesPerInstWord; 890 CodeBuffer cb(verified_entry, code_size + 1); 891 MacroAssembler* a = new MacroAssembler(&cb); 892 if (VM_Version::v9_instructions_work()) { 893 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler 894 } else { 895 a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler 896 } 897 ICache::invalidate_range(verified_entry, code_size); 898 } 899 900 901 void NativeIllegalInstruction::insert(address code_pos) { 902 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos); 903 nii->set_long_at(0, illegal_instruction()); 904 } 905 906 static int illegal_instruction_bits = 0; 907 908 int NativeInstruction::illegal_instruction() { 909 if (illegal_instruction_bits == 0) { 910 ResourceMark rm; 911 char buf[40]; 912 CodeBuffer cbuf((address)&buf[0], 20); 913 MacroAssembler* a = new MacroAssembler(&cbuf); 914 address ia = a->pc(); 915 a->trap(ST_RESERVED_FOR_USER_0 + 1); 916 int bits = *(int*)ia; 917 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction"); 918 illegal_instruction_bits = bits; 919 assert(illegal_instruction_bits != 0, "oops"); 920 } 921 return illegal_instruction_bits; 922 } 923 924 static int ic_miss_trap_bits = 0; 925 926 bool NativeInstruction::is_ic_miss_trap() { 927 if (ic_miss_trap_bits == 0) { 928 ResourceMark rm; 929 char buf[40]; 930 CodeBuffer cbuf((address)&buf[0], 20); 931 MacroAssembler* a = new MacroAssembler(&cbuf); 932 address ia = a->pc(); 933 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2); 934 int bits = *(int*)ia; 935 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction"); 936 ic_miss_trap_bits = bits; 937 assert(ic_miss_trap_bits != 0, "oops"); 938 } 939 return long_at(0) == ic_miss_trap_bits; 940 } 941 942 943 bool NativeInstruction::is_illegal() { 944 if (illegal_instruction_bits == 0) { 945 return false; 946 } 947 return long_at(0) == illegal_instruction_bits; 948 } 949 950 951 void NativeGeneralJump::verify() { 952 assert(((NativeInstruction *)this)->is_jump() || 953 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); 954 } 955 956 957 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { 958 Assembler::Condition condition = Assembler::always; 959 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) | 960 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22); 961 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos); 962 ni->set_long_at(0, x); 963 } 964 965 966 // MT-safe patching of a jmp instruction (and following word). 967 // First patches the second word, and then atomicly replaces 968 // the first word with the first new instruction word. 969 // Other processors might briefly see the old first word 970 // followed by the new second word. This is OK if the old 971 // second word is harmless, and the new second word may be 972 // harmlessly executed in the delay slot of the call. 973 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { 974 assert(Patching_lock->is_locked() || 975 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 976 assert (instr_addr != NULL, "illegal address for code patching"); 977 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call 978 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8"); 979 int i0 = ((int*)code_buffer)[0]; 980 int i1 = ((int*)code_buffer)[1]; 981 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord); 982 assert(inv_op(*contention_addr) == Assembler::arith_op || 983 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), 984 "must not interfere with original call"); 985 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order 986 h_jump->set_long_at(1*BytesPerInstWord, i1); 987 h_jump->set_long_at(0*BytesPerInstWord, i0); 988 // NOTE: It is possible that another thread T will execute 989 // only the second patched word. 990 // In other words, since the original instruction is this 991 // jmp patching_stub; nop (NativeGeneralJump) 992 // and the new sequence from the buffer is this: 993 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg) 994 // what T will execute is this: 995 // jmp patching_stub; add %r, %lo(K), %r 996 // thereby putting garbage into %r before calling the patching stub. 997 // This is OK, because the patching stub ignores the value of %r. 998 999 // Make sure the first-patched instruction, which may co-exist 1000 // briefly with the call, will do something harmless. 1001 assert(inv_op(*contention_addr) == Assembler::arith_op || 1002 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), 1003 "must not interfere with original call"); 1004 }