1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "nativeInst_sparc.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/handles.hpp" 31 #include "runtime/sharedRuntime.hpp" 32 #include "runtime/stubRoutines.hpp" 33 #include "utilities/ostream.hpp" 34 #ifdef COMPILER1 35 #include "c1/c1_Runtime1.hpp" 36 #endif 37 38 39 bool NativeInstruction::is_dtrace_trap() { 40 return !is_nop(); 41 } 42 43 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) { 44 ResourceMark rm; 45 CodeBuffer buf(instaddr, 10 * BytesPerInstWord ); 46 MacroAssembler* _masm = new MacroAssembler(&buf); 47 Register destreg; 48 49 destreg = inv_rd(*(unsigned int *)instaddr); 50 // Generate a the new sequence 51 _masm->patchable_sethi(x, destreg); 52 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord); 53 } 54 55 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) { 56 ResourceMark rm; 57 unsigned char buffer[10 * BytesPerInstWord]; 58 CodeBuffer buf(buffer, 10 * BytesPerInstWord); 59 MacroAssembler masm(&buf); 60 61 Register destreg = inv_rd(*(unsigned int *)instaddr); 62 // Generate the proper sequence into a temporary buffer and compare 63 // it with the original sequence. 64 masm.patchable_sethi(x, destreg); 65 int len = buffer - masm.pc(); 66 for (int i = 0; i < len; i++) { 67 assert(instaddr[i] == buffer[i], "instructions must match"); 68 } 69 } 70 71 void NativeInstruction::verify() { 72 // make sure code pattern is actually an instruction address 73 address addr = addr_at(0); 74 if (addr == 0 || ((intptr_t)addr & 3) != 0) { 75 fatal("not an instruction address"); 76 } 77 } 78 79 void NativeInstruction::print() { 80 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0)); 81 } 82 83 void NativeInstruction::set_long_at(int offset, int i) { 84 address addr = addr_at(offset); 85 *(int*)addr = i; 86 ICache::invalidate_word(addr); 87 } 88 89 void NativeInstruction::set_jlong_at(int offset, jlong i) { 90 address addr = addr_at(offset); 91 *(jlong*)addr = i; 92 // Don't need to invalidate 2 words here, because 93 // the flush instruction operates on doublewords. 94 ICache::invalidate_word(addr); 95 } 96 97 void NativeInstruction::set_addr_at(int offset, address x) { 98 address addr = addr_at(offset); 99 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment"); 100 *(uintptr_t*)addr = (uintptr_t)x; 101 // Don't need to invalidate 2 words here in the 64-bit case, 102 // because the flush instruction operates on doublewords. 103 ICache::invalidate_word(addr); 104 // The Intel code has this assertion for NativeCall::set_destination, 105 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset, 106 // NativeJump::set_jump_destination, and NativePushImm32::set_data 107 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction") 108 } 109 110 bool NativeInstruction::is_zero_test(Register ®) { 111 int x = long_at(0); 112 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3); 113 if (is_op3(x, temp, Assembler::arith_op) && 114 inv_immed(x) && inv_rd(x) == G0) { 115 if (inv_rs1(x) == G0) { 116 reg = inv_rs2(x); 117 return true; 118 } else if (inv_rs2(x) == G0) { 119 reg = inv_rs1(x); 120 return true; 121 } 122 } 123 return false; 124 } 125 126 bool NativeInstruction::is_load_store_with_small_offset(Register reg) { 127 int x = long_at(0); 128 if (is_op(x, Assembler::ldst_op) && 129 inv_rs1(x) == reg && inv_immed(x)) { 130 return true; 131 } 132 return false; 133 } 134 135 void NativeCall::verify() { 136 NativeInstruction::verify(); 137 // make sure code pattern is actually a call instruction 138 if (!is_op(long_at(0), Assembler::call_op)) { 139 fatal("not a call"); 140 } 141 } 142 143 void NativeCall::print() { 144 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); 145 } 146 147 148 // MT-safe patching of a call instruction (and following word). 149 // First patches the second word, and then atomicly replaces 150 // the first word with the first new instruction word. 151 // Other processors might briefly see the old first word 152 // followed by the new second word. This is OK if the old 153 // second word is harmless, and the new second word may be 154 // harmlessly executed in the delay slot of the call. 155 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { 156 assert(Patching_lock->is_locked() || 157 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 158 assert (instr_addr != NULL, "illegal address for code patching"); 159 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call 160 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8"); 161 int i0 = ((int*)code_buffer)[0]; 162 int i1 = ((int*)code_buffer)[1]; 163 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord); 164 assert(inv_op(*contention_addr) == Assembler::arith_op || 165 *contention_addr == nop_instruction(), 166 "must not interfere with original call"); 167 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order 168 n_call->set_long_at(1*BytesPerInstWord, i1); 169 n_call->set_long_at(0*BytesPerInstWord, i0); 170 // NOTE: It is possible that another thread T will execute 171 // only the second patched word. 172 // In other words, since the original instruction is this 173 // call patching_stub; nop (NativeCall) 174 // and the new sequence from the buffer is this: 175 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg) 176 // what T will execute is this: 177 // call patching_stub; add %r, %lo(K), %r 178 // thereby putting garbage into %r before calling the patching stub. 179 // This is OK, because the patching stub ignores the value of %r. 180 181 // Make sure the first-patched instruction, which may co-exist 182 // briefly with the call, will do something harmless. 183 assert(inv_op(*contention_addr) == Assembler::arith_op || 184 *contention_addr == nop_instruction(), 185 "must not interfere with original call"); 186 } 187 188 // Similar to replace_mt_safe, but just changes the destination. The 189 // important thing is that free-running threads are able to execute this 190 // call instruction at all times. Thus, the displacement field must be 191 // instruction-word-aligned. This is always true on SPARC. 192 // 193 // Used in the runtime linkage of calls; see class CompiledIC. 194 void NativeCall::set_destination_mt_safe(address dest) { 195 assert(Patching_lock->is_locked() || 196 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 197 // set_destination uses set_long_at which does the ICache::invalidate 198 set_destination(dest); 199 } 200 201 // Code for unit testing implementation of NativeCall class 202 void NativeCall::test() { 203 #ifdef ASSERT 204 ResourceMark rm; 205 CodeBuffer cb("test", 100, 100); 206 MacroAssembler* a = new MacroAssembler(&cb); 207 NativeCall *nc; 208 uint idx; 209 int offsets[] = { 210 0x0, 211 0xfffffff0, 212 0x7ffffff0, 213 0x80000000, 214 0x20, 215 0x4000, 216 }; 217 218 VM_Version::allow_all(); 219 220 a->call( a->pc(), relocInfo::none ); 221 a->delayed()->nop(); 222 nc = nativeCall_at( cb.insts_begin() ); 223 nc->print(); 224 225 nc = nativeCall_overwriting_at( nc->next_instruction_address() ); 226 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 227 nc->set_destination( cb.insts_begin() + offsets[idx] ); 228 assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test"); 229 nc->print(); 230 } 231 232 nc = nativeCall_before( cb.insts_begin() + 8 ); 233 nc->print(); 234 235 VM_Version::revert(); 236 #endif 237 } 238 // End code for unit testing implementation of NativeCall class 239 240 //------------------------------------------------------------------- 241 242 #ifdef _LP64 243 244 void NativeFarCall::set_destination(address dest) { 245 // Address materialized in the instruction stream, so nothing to do. 246 return; 247 #if 0 // What we'd do if we really did want to change the destination 248 if (destination() == dest) { 249 return; 250 } 251 ResourceMark rm; 252 CodeBuffer buf(addr_at(0), instruction_size + 1); 253 MacroAssembler* _masm = new MacroAssembler(&buf); 254 // Generate the new sequence 255 AddressLiteral(dest); 256 _masm->jumpl_to(dest, O7, O7); 257 ICache::invalidate_range(addr_at(0), instruction_size ); 258 #endif 259 } 260 261 void NativeFarCall::verify() { 262 // make sure code pattern is actually a jumpl_to instruction 263 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to"); 264 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); 265 nativeJump_at(addr_at(0))->verify(); 266 } 267 268 bool NativeFarCall::is_call_at(address instr) { 269 return nativeInstruction_at(instr)->is_sethi(); 270 } 271 272 void NativeFarCall::print() { 273 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); 274 } 275 276 bool NativeFarCall::destination_is_compiled_verified_entry_point() { 277 nmethod* callee = CodeCache::find_nmethod(destination()); 278 if (callee == NULL) { 279 return false; 280 } else { 281 return destination() == callee->verified_entry_point(); 282 } 283 } 284 285 // MT-safe patching of a far call. 286 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) { 287 Unimplemented(); 288 } 289 290 // Code for unit testing implementation of NativeFarCall class 291 void NativeFarCall::test() { 292 Unimplemented(); 293 } 294 // End code for unit testing implementation of NativeFarCall class 295 296 #endif // _LP64 297 298 //------------------------------------------------------------------- 299 300 301 void NativeMovConstReg::verify() { 302 NativeInstruction::verify(); 303 // make sure code pattern is actually a "set_metadata" synthetic instruction 304 // see MacroAssembler::set_oop() 305 int i0 = long_at(sethi_offset); 306 int i1 = long_at(add_offset); 307 308 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg" 309 Register rd = inv_rd(i0); 310 #ifndef _LP64 311 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && 312 is_op3(i1, Assembler::add_op3, Assembler::arith_op) && 313 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && 314 rd == inv_rs1(i1) && rd == inv_rd(i1))) { 315 fatal("not a set_metadata"); 316 } 317 #else 318 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { 319 fatal("not a set_metadata"); 320 } 321 #endif 322 } 323 324 325 void NativeMovConstReg::print() { 326 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); 327 } 328 329 330 #ifdef _LP64 331 intptr_t NativeMovConstReg::data() const { 332 return data64(addr_at(sethi_offset), long_at(add_offset)); 333 } 334 #else 335 intptr_t NativeMovConstReg::data() const { 336 return data32(long_at(sethi_offset), long_at(add_offset)); 337 } 338 #endif 339 340 341 void NativeMovConstReg::set_data(intptr_t x) { 342 #ifdef _LP64 343 set_data64_sethi(addr_at(sethi_offset), x); 344 #else 345 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x)); 346 #endif 347 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x)); 348 349 // also store the value into an oop_Relocation cell, if any 350 CodeBlob* cb = CodeCache::find_blob(instruction_address()); 351 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL; 352 if (nm != NULL) { 353 RelocIterator iter(nm, instruction_address(), next_instruction_address()); 354 oop* oop_addr = NULL; 355 Metadata** metadata_addr = NULL; 356 while (iter.next()) { 357 if (iter.type() == relocInfo::oop_type) { 358 oop_Relocation *r = iter.oop_reloc(); 359 if (oop_addr == NULL) { 360 oop_addr = r->oop_addr(); 361 *oop_addr = cast_to_oop(x); 362 } else { 363 assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); 364 } 365 } 366 if (iter.type() == relocInfo::metadata_type) { 367 metadata_Relocation *r = iter.metadata_reloc(); 368 if (metadata_addr == NULL) { 369 metadata_addr = r->metadata_addr(); 370 *metadata_addr = (Metadata*)x; 371 } else { 372 assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here"); 373 } 374 } 375 } 376 } 377 } 378 379 380 // Code for unit testing implementation of NativeMovConstReg class 381 void NativeMovConstReg::test() { 382 #ifdef ASSERT 383 ResourceMark rm; 384 CodeBuffer cb("test", 100, 100); 385 MacroAssembler* a = new MacroAssembler(&cb); 386 NativeMovConstReg* nm; 387 uint idx; 388 int offsets[] = { 389 0x0, 390 0x7fffffff, 391 0x80000000, 392 0xffffffff, 393 0x20, 394 4096, 395 4097, 396 }; 397 398 VM_Version::allow_all(); 399 400 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); 401 a->sethi(al1, I3); 402 a->add(I3, al1.low10(), I3); 403 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); 404 a->sethi(al2, O2); 405 a->add(O2, al2.low10(), O2); 406 407 nm = nativeMovConstReg_at( cb.insts_begin() ); 408 nm->print(); 409 410 nm = nativeMovConstReg_at( nm->next_instruction_address() ); 411 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 412 nm->set_data( offsets[idx] ); 413 assert(nm->data() == offsets[idx], "check unit test"); 414 } 415 nm->print(); 416 417 VM_Version::revert(); 418 #endif 419 } 420 // End code for unit testing implementation of NativeMovConstReg class 421 422 //------------------------------------------------------------------- 423 424 void NativeMovConstRegPatching::verify() { 425 NativeInstruction::verify(); 426 // Make sure code pattern is sethi/nop/add. 427 int i0 = long_at(sethi_offset); 428 int i1 = long_at(nop_offset); 429 int i2 = long_at(add_offset); 430 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); 431 432 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg" 433 // The casual reader should note that on Sparc a nop is a special case if sethi 434 // in which the destination register is %g0. 435 Register rd0 = inv_rd(i0); 436 Register rd1 = inv_rd(i1); 437 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 && 438 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi 439 is_op3(i2, Assembler::add_op3, Assembler::arith_op) && 440 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) && 441 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) { 442 fatal("not a set_metadata"); 443 } 444 } 445 446 447 void NativeMovConstRegPatching::print() { 448 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); 449 } 450 451 452 int NativeMovConstRegPatching::data() const { 453 #ifdef _LP64 454 return data64(addr_at(sethi_offset), long_at(add_offset)); 455 #else 456 return data32(long_at(sethi_offset), long_at(add_offset)); 457 #endif 458 } 459 460 461 void NativeMovConstRegPatching::set_data(int x) { 462 #ifdef _LP64 463 set_data64_sethi(addr_at(sethi_offset), x); 464 #else 465 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x)); 466 #endif 467 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x)); 468 469 // also store the value into an oop_Relocation cell, if any 470 CodeBlob* cb = CodeCache::find_blob(instruction_address()); 471 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL; 472 if (nm != NULL) { 473 RelocIterator iter(nm, instruction_address(), next_instruction_address()); 474 oop* oop_addr = NULL; 475 Metadata** metadata_addr = NULL; 476 while (iter.next()) { 477 if (iter.type() == relocInfo::oop_type) { 478 oop_Relocation *r = iter.oop_reloc(); 479 if (oop_addr == NULL) { 480 oop_addr = r->oop_addr(); 481 *oop_addr = cast_to_oop(x); 482 } else { 483 assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); 484 } 485 } 486 if (iter.type() == relocInfo::metadata_type) { 487 metadata_Relocation *r = iter.metadata_reloc(); 488 if (metadata_addr == NULL) { 489 metadata_addr = r->metadata_addr(); 490 *metadata_addr = (Metadata*)x; 491 } else { 492 assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here"); 493 } 494 } 495 } 496 } 497 } 498 499 500 // Code for unit testing implementation of NativeMovConstRegPatching class 501 void NativeMovConstRegPatching::test() { 502 #ifdef ASSERT 503 ResourceMark rm; 504 CodeBuffer cb("test", 100, 100); 505 MacroAssembler* a = new MacroAssembler(&cb); 506 NativeMovConstRegPatching* nm; 507 uint idx; 508 int offsets[] = { 509 0x0, 510 0x7fffffff, 511 0x80000000, 512 0xffffffff, 513 0x20, 514 4096, 515 4097, 516 }; 517 518 VM_Version::allow_all(); 519 520 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); 521 a->sethi(al1, I3); 522 a->nop(); 523 a->add(I3, al1.low10(), I3); 524 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); 525 a->sethi(al2, O2); 526 a->nop(); 527 a->add(O2, al2.low10(), O2); 528 529 nm = nativeMovConstRegPatching_at( cb.insts_begin() ); 530 nm->print(); 531 532 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() ); 533 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 534 nm->set_data( offsets[idx] ); 535 assert(nm->data() == offsets[idx], "check unit test"); 536 } 537 nm->print(); 538 539 VM_Version::revert(); 540 #endif // ASSERT 541 } 542 // End code for unit testing implementation of NativeMovConstRegPatching class 543 544 545 //------------------------------------------------------------------- 546 547 548 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) { 549 Untested("copy_instruction_to"); 550 int instruction_size = next_instruction_address() - instruction_address(); 551 for (int i = 0; i < instruction_size; i += BytesPerInstWord) { 552 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i); 553 } 554 } 555 556 557 void NativeMovRegMem::verify() { 558 NativeInstruction::verify(); 559 // make sure code pattern is actually a "ld" or "st" of some sort. 560 int i0 = long_at(0); 561 int op3 = inv_op3(i0); 562 563 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok"); 564 565 if (!(is_op(i0, Assembler::ldst_op) && 566 inv_immed(i0) && 567 0 != (op3 < op3_ldst_int_limit 568 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) 569 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) 570 { 571 int i1 = long_at(ldst_offset); 572 Register rd = inv_rd(i0); 573 574 op3 = inv_op3(i1); 575 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && 576 0 != (op3 < op3_ldst_int_limit 577 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) 578 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { 579 fatal("not a ld* or st* op"); 580 } 581 } 582 } 583 584 585 void NativeMovRegMem::print() { 586 if (is_immediate()) { 587 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); 588 } else { 589 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); 590 } 591 } 592 593 594 // Code for unit testing implementation of NativeMovRegMem class 595 void NativeMovRegMem::test() { 596 #ifdef ASSERT 597 ResourceMark rm; 598 CodeBuffer cb("test", 1000, 1000); 599 MacroAssembler* a = new MacroAssembler(&cb); 600 NativeMovRegMem* nm; 601 uint idx = 0; 602 uint idx1; 603 int offsets[] = { 604 0x0, 605 0xffffffff, 606 0x7fffffff, 607 0x80000000, 608 4096, 609 4097, 610 0x20, 611 0x4000, 612 }; 613 614 VM_Version::allow_all(); 615 616 AddressLiteral al1(0xffffffff, relocInfo::external_word_type); 617 AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type); 618 a->ldsw( G5, al1.low10(), G4 ); idx++; 619 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 620 a->ldsw( G5, I3, G4 ); idx++; 621 a->ldsb( G5, al1.low10(), G4 ); idx++; 622 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 623 a->ldsb( G5, I3, G4 ); idx++; 624 a->ldsh( G5, al1.low10(), G4 ); idx++; 625 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 626 a->ldsh( G5, I3, G4 ); idx++; 627 a->lduw( G5, al1.low10(), G4 ); idx++; 628 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 629 a->lduw( G5, I3, G4 ); idx++; 630 a->ldub( G5, al1.low10(), G4 ); idx++; 631 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 632 a->ldub( G5, I3, G4 ); idx++; 633 a->lduh( G5, al1.low10(), G4 ); idx++; 634 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 635 a->lduh( G5, I3, G4 ); idx++; 636 a->ldx( G5, al1.low10(), G4 ); idx++; 637 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 638 a->ldx( G5, I3, G4 ); idx++; 639 a->ldd( G5, al1.low10(), G4 ); idx++; 640 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 641 a->ldd( G5, I3, G4 ); idx++; 642 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; 643 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 644 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; 645 646 a->stw( G5, G4, al1.low10() ); idx++; 647 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 648 a->stw( G5, G4, I3 ); idx++; 649 a->stb( G5, G4, al1.low10() ); idx++; 650 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 651 a->stb( G5, G4, I3 ); idx++; 652 a->sth( G5, G4, al1.low10() ); idx++; 653 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 654 a->sth( G5, G4, I3 ); idx++; 655 a->stx( G5, G4, al1.low10() ); idx++; 656 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 657 a->stx( G5, G4, I3 ); idx++; 658 a->std( G5, G4, al1.low10() ); idx++; 659 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 660 a->std( G5, G4, I3 ); idx++; 661 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; 662 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 663 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; 664 665 nm = nativeMovRegMem_at( cb.insts_begin() ); 666 nm->print(); 667 nm->set_offset( low10(0) ); 668 nm->print(); 669 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); 670 nm->print(); 671 672 while (--idx) { 673 nm = nativeMovRegMem_at( nm->next_instruction_address() ); 674 nm->print(); 675 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { 676 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); 677 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), 678 "check unit test"); 679 nm->print(); 680 } 681 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); 682 nm->print(); 683 } 684 685 VM_Version::revert(); 686 #endif // ASSERT 687 } 688 689 // End code for unit testing implementation of NativeMovRegMem class 690 691 //-------------------------------------------------------------------------------- 692 693 694 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) { 695 Untested("copy_instruction_to"); 696 int instruction_size = next_instruction_address() - instruction_address(); 697 for (int i = 0; i < instruction_size; i += wordSize) { 698 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i); 699 } 700 } 701 702 703 void NativeMovRegMemPatching::verify() { 704 NativeInstruction::verify(); 705 // make sure code pattern is actually a "ld" or "st" of some sort. 706 int i0 = long_at(0); 707 int op3 = inv_op3(i0); 708 709 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); 710 711 if (!(is_op(i0, Assembler::ldst_op) && 712 inv_immed(i0) && 713 0 != (op3 < op3_ldst_int_limit 714 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) 715 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) { 716 int i1 = long_at(ldst_offset); 717 Register rd = inv_rd(i0); 718 719 op3 = inv_op3(i1); 720 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && 721 0 != (op3 < op3_ldst_int_limit 722 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) 723 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { 724 fatal("not a ld* or st* op"); 725 } 726 } 727 } 728 729 730 void NativeMovRegMemPatching::print() { 731 if (is_immediate()) { 732 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); 733 } else { 734 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); 735 } 736 } 737 738 739 // Code for unit testing implementation of NativeMovRegMemPatching class 740 void NativeMovRegMemPatching::test() { 741 #ifdef ASSERT 742 ResourceMark rm; 743 CodeBuffer cb("test", 1000, 1000); 744 MacroAssembler* a = new MacroAssembler(&cb); 745 NativeMovRegMemPatching* nm; 746 uint idx = 0; 747 uint idx1; 748 int offsets[] = { 749 0x0, 750 0xffffffff, 751 0x7fffffff, 752 0x80000000, 753 4096, 754 4097, 755 0x20, 756 0x4000, 757 }; 758 759 VM_Version::allow_all(); 760 761 AddressLiteral al(0xffffffff, relocInfo::external_word_type); 762 a->ldsw( G5, al.low10(), G4); idx++; 763 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 764 a->ldsw( G5, I3, G4 ); idx++; 765 a->ldsb( G5, al.low10(), G4); idx++; 766 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 767 a->ldsb( G5, I3, G4 ); idx++; 768 a->ldsh( G5, al.low10(), G4); idx++; 769 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 770 a->ldsh( G5, I3, G4 ); idx++; 771 a->lduw( G5, al.low10(), G4); idx++; 772 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 773 a->lduw( G5, I3, G4 ); idx++; 774 a->ldub( G5, al.low10(), G4); idx++; 775 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 776 a->ldub( G5, I3, G4 ); idx++; 777 a->lduh( G5, al.low10(), G4); idx++; 778 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 779 a->lduh( G5, I3, G4 ); idx++; 780 a->ldx( G5, al.low10(), G4); idx++; 781 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 782 a->ldx( G5, I3, G4 ); idx++; 783 a->ldd( G5, al.low10(), G4); idx++; 784 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 785 a->ldd( G5, I3, G4 ); idx++; 786 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; 787 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 788 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; 789 790 a->stw( G5, G4, al.low10()); idx++; 791 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 792 a->stw( G5, G4, I3 ); idx++; 793 a->stb( G5, G4, al.low10()); idx++; 794 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 795 a->stb( G5, G4, I3 ); idx++; 796 a->sth( G5, G4, al.low10()); idx++; 797 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 798 a->sth( G5, G4, I3 ); idx++; 799 a->stx( G5, G4, al.low10()); idx++; 800 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 801 a->stx( G5, G4, I3 ); idx++; 802 a->std( G5, G4, al.low10()); idx++; 803 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 804 a->std( G5, G4, I3 ); idx++; 805 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; 806 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 807 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; 808 809 nm = nativeMovRegMemPatching_at( cb.insts_begin() ); 810 nm->print(); 811 nm->set_offset( low10(0) ); 812 nm->print(); 813 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); 814 nm->print(); 815 816 while (--idx) { 817 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() ); 818 nm->print(); 819 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { 820 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); 821 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), 822 "check unit test"); 823 nm->print(); 824 } 825 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); 826 nm->print(); 827 } 828 829 VM_Version::revert(); 830 #endif // ASSERT 831 } 832 // End code for unit testing implementation of NativeMovRegMemPatching class 833 834 835 //-------------------------------------------------------------------------------- 836 837 838 void NativeJump::verify() { 839 NativeInstruction::verify(); 840 int i0 = long_at(sethi_offset); 841 int i1 = long_at(jmpl_offset); 842 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); 843 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg" 844 Register rd = inv_rd(i0); 845 #ifndef _LP64 846 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && 847 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) || 848 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) && 849 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && 850 rd == inv_rs1(i1))) { 851 fatal("not a jump_to instruction"); 852 } 853 #else 854 // In LP64, the jump instruction location varies for non relocatable 855 // jumps, for example is could be sethi, xor, jmp instead of the 856 // 7 instructions for sethi. So let's check sethi only. 857 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { 858 fatal("not a jump_to instruction"); 859 } 860 #endif 861 } 862 863 864 void NativeJump::print() { 865 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination()); 866 } 867 868 869 // Code for unit testing implementation of NativeJump class 870 void NativeJump::test() { 871 #ifdef ASSERT 872 ResourceMark rm; 873 CodeBuffer cb("test", 100, 100); 874 MacroAssembler* a = new MacroAssembler(&cb); 875 NativeJump* nj; 876 uint idx; 877 int offsets[] = { 878 0x0, 879 0xffffffff, 880 0x7fffffff, 881 0x80000000, 882 4096, 883 4097, 884 0x20, 885 0x4000, 886 }; 887 888 VM_Version::allow_all(); 889 890 AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type); 891 a->sethi(al, I3); 892 a->jmpl(I3, al.low10(), G0, RelocationHolder::none); 893 a->delayed()->nop(); 894 a->sethi(al, I3); 895 a->jmpl(I3, al.low10(), L3, RelocationHolder::none); 896 a->delayed()->nop(); 897 898 nj = nativeJump_at( cb.insts_begin() ); 899 nj->print(); 900 901 nj = nativeJump_at( nj->next_instruction_address() ); 902 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 903 nj->set_jump_destination( nj->instruction_address() + offsets[idx] ); 904 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test"); 905 nj->print(); 906 } 907 908 VM_Version::revert(); 909 #endif // ASSERT 910 } 911 // End code for unit testing implementation of NativeJump class 912 913 914 void NativeJump::insert(address code_pos, address entry) { 915 Unimplemented(); 916 } 917 918 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) 919 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot). 920 // Atomic write can be only with 1 word. 921 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { 922 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere 923 // in the header of the nmethod, within a short branch's span of the patch point. 924 // Set up the jump sequence using NativeJump::insert, and then use an annulled 925 // unconditional branch at the target site (an atomic 1-word update). 926 // Limitations: You can only patch nmethods, with any given nmethod patched at 927 // most once, and the patch must be in the nmethod's header. 928 // It's messy, but you can ask the CodeCache for the nmethod containing the 929 // target address. 930 931 // %%%%% For now, do something MT-stupid: 932 ResourceMark rm; 933 int code_size = 1 * BytesPerInstWord; 934 CodeBuffer cb(verified_entry, code_size + 1); 935 MacroAssembler* a = new MacroAssembler(&cb); 936 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler 937 ICache::invalidate_range(verified_entry, code_size); 938 } 939 940 941 void NativeIllegalInstruction::insert(address code_pos) { 942 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos); 943 nii->set_long_at(0, illegal_instruction()); 944 } 945 946 static int illegal_instruction_bits = 0; 947 948 int NativeInstruction::illegal_instruction() { 949 if (illegal_instruction_bits == 0) { 950 ResourceMark rm; 951 char buf[40]; 952 CodeBuffer cbuf((address)&buf[0], 20); 953 MacroAssembler* a = new MacroAssembler(&cbuf); 954 address ia = a->pc(); 955 a->trap(ST_RESERVED_FOR_USER_0 + 1); 956 int bits = *(int*)ia; 957 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction"); 958 illegal_instruction_bits = bits; 959 assert(illegal_instruction_bits != 0, "oops"); 960 } 961 return illegal_instruction_bits; 962 } 963 964 static int ic_miss_trap_bits = 0; 965 966 bool NativeInstruction::is_ic_miss_trap() { 967 if (ic_miss_trap_bits == 0) { 968 ResourceMark rm; 969 char buf[40]; 970 CodeBuffer cbuf((address)&buf[0], 20); 971 MacroAssembler* a = new MacroAssembler(&cbuf); 972 address ia = a->pc(); 973 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2); 974 int bits = *(int*)ia; 975 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction"); 976 ic_miss_trap_bits = bits; 977 assert(ic_miss_trap_bits != 0, "oops"); 978 } 979 return long_at(0) == ic_miss_trap_bits; 980 } 981 982 983 bool NativeInstruction::is_illegal() { 984 if (illegal_instruction_bits == 0) { 985 return false; 986 } 987 return long_at(0) == illegal_instruction_bits; 988 } 989 990 991 void NativeGeneralJump::verify() { 992 assert(((NativeInstruction *)this)->is_jump() || 993 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); 994 } 995 996 997 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { 998 Assembler::Condition condition = Assembler::always; 999 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) | 1000 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22); 1001 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos); 1002 ni->set_long_at(0, x); 1003 } 1004 1005 1006 // MT-safe patching of a jmp instruction (and following word). 1007 // First patches the second word, and then atomicly replaces 1008 // the first word with the first new instruction word. 1009 // Other processors might briefly see the old first word 1010 // followed by the new second word. This is OK if the old 1011 // second word is harmless, and the new second word may be 1012 // harmlessly executed in the delay slot of the call. 1013 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { 1014 assert(Patching_lock->is_locked() || 1015 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 1016 assert (instr_addr != NULL, "illegal address for code patching"); 1017 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call 1018 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8"); 1019 int i0 = ((int*)code_buffer)[0]; 1020 int i1 = ((int*)code_buffer)[1]; 1021 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord); 1022 assert(inv_op(*contention_addr) == Assembler::arith_op || 1023 *contention_addr == nop_instruction(), 1024 "must not interfere with original call"); 1025 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order 1026 h_jump->set_long_at(1*BytesPerInstWord, i1); 1027 h_jump->set_long_at(0*BytesPerInstWord, i0); 1028 // NOTE: It is possible that another thread T will execute 1029 // only the second patched word. 1030 // In other words, since the original instruction is this 1031 // jmp patching_stub; nop (NativeGeneralJump) 1032 // and the new sequence from the buffer is this: 1033 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg) 1034 // what T will execute is this: 1035 // jmp patching_stub; add %r, %lo(K), %r 1036 // thereby putting garbage into %r before calling the patching stub. 1037 // This is OK, because the patching stub ignores the value of %r. 1038 1039 // Make sure the first-patched instruction, which may co-exist 1040 // briefly with the call, will do something harmless. 1041 assert(inv_op(*contention_addr) == Assembler::arith_op || 1042 *contention_addr == nop_instruction(), 1043 "must not interfere with original call"); 1044 }