1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "nativeInst_sparc.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "runtime/stubRoutines.hpp"
  34 #include "utilities/ostream.hpp"
  35 #ifdef COMPILER1
  36 #include "c1/c1_Runtime1.hpp"
  37 #endif
  38 
  39 
  40 bool NativeInstruction::is_dtrace_trap() {
  41   return !is_nop();
  42 }
  43 
  44 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
  45   ResourceMark rm;
  46   CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
  47   MacroAssembler* _masm = new MacroAssembler(&buf);
  48   Register destreg;
  49 
  50   destreg = inv_rd(*(unsigned int *)instaddr);
  51   // Generate a the new sequence
  52   _masm->patchable_sethi(x, destreg);
  53   ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
  54 }
  55 
  56 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
  57   ResourceMark rm;
  58   unsigned char buffer[10 * BytesPerInstWord];
  59   CodeBuffer buf(buffer, 10 * BytesPerInstWord);
  60   MacroAssembler masm(&buf);
  61 
  62   Register destreg = inv_rd(*(unsigned int *)instaddr);
  63   // Generate the proper sequence into a temporary buffer and compare
  64   // it with the original sequence.
  65   masm.patchable_sethi(x, destreg);
  66   int len = buffer - masm.pc();
  67   for (int i = 0; i < len; i++) {
  68     assert(instaddr[i] == buffer[i], "instructions must match");
  69   }
  70 }
  71 
  72 void NativeInstruction::verify() {
  73   // make sure code pattern is actually an instruction address
  74   address addr = addr_at(0);
  75   if (addr == 0 || ((intptr_t)addr & 3) != 0) {
  76     fatal("not an instruction address");
  77   }
  78 }
  79 
  80 void NativeInstruction::print() {
  81   tty->print_cr(INTPTR_FORMAT ": 0x%x", p2i(addr_at(0)), long_at(0));
  82 }
  83 
  84 void NativeInstruction::set_long_at(int offset, int i) {
  85   address addr = addr_at(offset);
  86   *(int*)addr = i;
  87   ICache::invalidate_word(addr);
  88 }
  89 
  90 void NativeInstruction::set_jlong_at(int offset, jlong i) {
  91   address addr = addr_at(offset);
  92   *(jlong*)addr = i;
  93   // Don't need to invalidate 2 words here, because
  94   // the flush instruction operates on doublewords.
  95   ICache::invalidate_word(addr);
  96 }
  97 
  98 void NativeInstruction::set_addr_at(int offset, address x) {
  99   address addr = addr_at(offset);
 100   assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
 101   *(uintptr_t*)addr = (uintptr_t)x;
 102   // Don't need to invalidate 2 words here in the 64-bit case,
 103   // because the flush instruction operates on doublewords.
 104   ICache::invalidate_word(addr);
 105   // The Intel code has this assertion for NativeCall::set_destination,
 106   // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
 107   // NativeJump::set_jump_destination, and NativePushImm32::set_data
 108   //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
 109 }
 110 
 111 bool NativeInstruction::is_zero_test(Register &reg) {
 112   int x = long_at(0);
 113   Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
 114   if (is_op3(x, temp, Assembler::arith_op) &&
 115       inv_immed(x) && inv_rd(x) == G0) {
 116       if (inv_rs1(x) == G0) {
 117         reg = inv_rs2(x);
 118         return true;
 119       } else if (inv_rs2(x) == G0) {
 120         reg = inv_rs1(x);
 121         return true;
 122       }
 123   }
 124   return false;
 125 }
 126 
 127 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
 128   int x = long_at(0);
 129   if (is_op(x, Assembler::ldst_op) &&
 130       inv_rs1(x) == reg && inv_immed(x)) {
 131     return true;
 132   }
 133   return false;
 134 }
 135 
 136 void NativeCall::verify() {
 137   NativeInstruction::verify();
 138   // make sure code pattern is actually a call instruction
 139   if (!is_op(long_at(0), Assembler::call_op)) {
 140     fatal("not a call");
 141   }
 142 }
 143 
 144 void NativeCall::print() {
 145   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
 146 }
 147 
 148 
 149 // MT-safe patching of a call instruction (and following word).
 150 // First patches the second word, and then atomicly replaces
 151 // the first word with the first new instruction word.
 152 // Other processors might briefly see the old first word
 153 // followed by the new second word.  This is OK if the old
 154 // second word is harmless, and the new second word may be
 155 // harmlessly executed in the delay slot of the call.
 156 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
 157   assert(Patching_lock->is_locked() ||
 158          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 159    assert (instr_addr != NULL, "illegal address for code patching");
 160    NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
 161    assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
 162    int i0 = ((int*)code_buffer)[0];
 163    int i1 = ((int*)code_buffer)[1];
 164    int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
 165    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 166           *contention_addr == nop_instruction(),
 167           "must not interfere with original call");
 168    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
 169    n_call->set_long_at(1*BytesPerInstWord, i1);
 170    n_call->set_long_at(0*BytesPerInstWord, i0);
 171    // NOTE:  It is possible that another thread T will execute
 172    // only the second patched word.
 173    // In other words, since the original instruction is this
 174    //    call patching_stub; nop                   (NativeCall)
 175    // and the new sequence from the buffer is this:
 176    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
 177    // what T will execute is this:
 178    //    call patching_stub; add %r, %lo(K), %r
 179    // thereby putting garbage into %r before calling the patching stub.
 180    // This is OK, because the patching stub ignores the value of %r.
 181 
 182    // Make sure the first-patched instruction, which may co-exist
 183    // briefly with the call, will do something harmless.
 184    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 185           *contention_addr == nop_instruction(),
 186           "must not interfere with original call");
 187 }
 188 
 189 // Similar to replace_mt_safe, but just changes the destination.  The
 190 // important thing is that free-running threads are able to execute this
 191 // call instruction at all times.  Thus, the displacement field must be
 192 // instruction-word-aligned.  This is always true on SPARC.
 193 //
 194 // Used in the runtime linkage of calls; see class CompiledIC.
 195 void NativeCall::set_destination_mt_safe(address dest) {
 196   assert(Patching_lock->is_locked() ||
 197          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 198   // set_destination uses set_long_at which does the ICache::invalidate
 199   set_destination(dest);
 200 }
 201 
 202 // Code for unit testing implementation of NativeCall class
 203 void NativeCall::test() {
 204 #ifdef ASSERT
 205   ResourceMark rm;
 206   CodeBuffer cb("test", 100, 100);
 207   MacroAssembler* a = new MacroAssembler(&cb);
 208   NativeCall  *nc;
 209   uint idx;
 210   int offsets[] = {
 211     0x0,
 212     0xfffffff0,
 213     0x7ffffff0,
 214     0x80000000,
 215     0x20,
 216     0x4000,
 217   };
 218 
 219   VM_Version::allow_all();
 220 
 221   a->call( a->pc(), relocInfo::none );
 222   a->delayed()->nop();
 223   nc = nativeCall_at( cb.insts_begin() );
 224   nc->print();
 225 
 226   nc = nativeCall_overwriting_at( nc->next_instruction_address() );
 227   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 228     nc->set_destination( cb.insts_begin() + offsets[idx] );
 229     assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
 230     nc->print();
 231   }
 232 
 233   nc = nativeCall_before( cb.insts_begin() + 8 );
 234   nc->print();
 235 
 236   VM_Version::revert();
 237 #endif
 238 }
 239 // End code for unit testing implementation of NativeCall class
 240 
 241 //-------------------------------------------------------------------
 242 
 243 #ifdef _LP64
 244 
 245 void NativeFarCall::set_destination(address dest) {
 246   // Address materialized in the instruction stream, so nothing to do.
 247   return;
 248 #if 0 // What we'd do if we really did want to change the destination
 249   if (destination() == dest) {
 250     return;
 251   }
 252   ResourceMark rm;
 253   CodeBuffer buf(addr_at(0), instruction_size + 1);
 254   MacroAssembler* _masm = new MacroAssembler(&buf);
 255   // Generate the new sequence
 256   AddressLiteral(dest);
 257   _masm->jumpl_to(dest, O7, O7);
 258   ICache::invalidate_range(addr_at(0), instruction_size );
 259 #endif
 260 }
 261 
 262 void NativeFarCall::verify() {
 263   // make sure code pattern is actually a jumpl_to instruction
 264   assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
 265   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 266   nativeJump_at(addr_at(0))->verify();
 267 }
 268 
 269 bool NativeFarCall::is_call_at(address instr) {
 270   return nativeInstruction_at(instr)->is_sethi();
 271 }
 272 
 273 void NativeFarCall::print() {
 274   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
 275 }
 276 
 277 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
 278   nmethod* callee = CodeCache::find_nmethod(destination());
 279   if (callee == NULL) {
 280     return false;
 281   } else {
 282     return destination() == callee->verified_entry_point();
 283   }
 284 }
 285 
 286 // MT-safe patching of a far call.
 287 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
 288   Unimplemented();
 289 }
 290 
 291 // Code for unit testing implementation of NativeFarCall class
 292 void NativeFarCall::test() {
 293   Unimplemented();
 294 }
 295 // End code for unit testing implementation of NativeFarCall class
 296 
 297 #endif // _LP64
 298 
 299 //-------------------------------------------------------------------
 300 
 301 
 302 void NativeMovConstReg::verify() {
 303   NativeInstruction::verify();
 304   // make sure code pattern is actually a "set_metadata" synthetic instruction
 305   // see MacroAssembler::set_oop()
 306   int i0 = long_at(sethi_offset);
 307   int i1 = long_at(add_offset);
 308 
 309   // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
 310   Register rd = inv_rd(i0);
 311 #ifndef _LP64
 312   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
 313         is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
 314         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
 315         rd == inv_rs1(i1) && rd == inv_rd(i1))) {
 316     fatal("not a set_metadata");
 317   }
 318 #else
 319   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
 320     fatal("not a set_metadata");
 321   }
 322 #endif
 323 }
 324 
 325 
 326 void NativeMovConstReg::print() {
 327   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
 328 }
 329 
 330 
 331 #ifdef _LP64
 332 intptr_t NativeMovConstReg::data() const {
 333   return data64(addr_at(sethi_offset), long_at(add_offset));
 334 }
 335 #else
 336 intptr_t NativeMovConstReg::data() const {
 337   return data32(long_at(sethi_offset), long_at(add_offset));
 338 }
 339 #endif
 340 
 341 
 342 void NativeMovConstReg::set_data(intptr_t x) {
 343 #ifdef _LP64
 344   set_data64_sethi(addr_at(sethi_offset), x);
 345 #else
 346   set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
 347 #endif
 348   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
 349 
 350   // also store the value into an oop_Relocation cell, if any
 351   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 352   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
 353   if (nm != NULL) {
 354     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 355     oop* oop_addr = NULL;
 356     Metadata** metadata_addr = NULL;
 357     while (iter.next()) {
 358       if (iter.type() == relocInfo::oop_type) {
 359         oop_Relocation *r = iter.oop_reloc();
 360         if (oop_addr == NULL) {
 361           oop_addr = r->oop_addr();
 362           *oop_addr = cast_to_oop(x);
 363         } else {
 364           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
 365         }
 366       }
 367       if (iter.type() == relocInfo::metadata_type) {
 368         metadata_Relocation *r = iter.metadata_reloc();
 369         if (metadata_addr == NULL) {
 370           metadata_addr = r->metadata_addr();
 371           *metadata_addr = (Metadata*)x;
 372         } else {
 373           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
 374         }
 375       }
 376     }
 377   }
 378 }
 379 
 380 
 381 // Code for unit testing implementation of NativeMovConstReg class
 382 void NativeMovConstReg::test() {
 383 #ifdef ASSERT
 384   ResourceMark rm;
 385   CodeBuffer cb("test", 100, 100);
 386   MacroAssembler* a = new MacroAssembler(&cb);
 387   NativeMovConstReg* nm;
 388   uint idx;
 389   int offsets[] = {
 390     0x0,
 391     0x7fffffff,
 392     0x80000000,
 393     0xffffffff,
 394     0x20,
 395     4096,
 396     4097,
 397   };
 398 
 399   VM_Version::allow_all();
 400 
 401   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
 402   a->sethi(al1, I3);
 403   a->add(I3, al1.low10(), I3);
 404   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
 405   a->sethi(al2, O2);
 406   a->add(O2, al2.low10(), O2);
 407 
 408   nm = nativeMovConstReg_at( cb.insts_begin() );
 409   nm->print();
 410 
 411   nm = nativeMovConstReg_at( nm->next_instruction_address() );
 412   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 413     nm->set_data( offsets[idx] );
 414     assert(nm->data() == offsets[idx], "check unit test");
 415   }
 416   nm->print();
 417 
 418   VM_Version::revert();
 419 #endif
 420 }
 421 // End code for unit testing implementation of NativeMovConstReg class
 422 
 423 //-------------------------------------------------------------------
 424 
 425 void NativeMovConstRegPatching::verify() {
 426   NativeInstruction::verify();
 427   // Make sure code pattern is sethi/nop/add.
 428   int i0 = long_at(sethi_offset);
 429   int i1 = long_at(nop_offset);
 430   int i2 = long_at(add_offset);
 431   assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 432 
 433   // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
 434   // The casual reader should note that on Sparc a nop is a special case if sethi
 435   // in which the destination register is %g0.
 436   Register rd0 = inv_rd(i0);
 437   Register rd1 = inv_rd(i1);
 438   if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
 439         is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
 440         is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
 441         inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
 442         rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
 443     fatal("not a set_metadata");
 444   }
 445 }
 446 
 447 
 448 void NativeMovConstRegPatching::print() {
 449   tty->print_cr(INTPTR_FORMAT ": mov reg, 0x%x", p2i(instruction_address()), data());
 450 }
 451 
 452 
 453 int NativeMovConstRegPatching::data() const {
 454 #ifdef _LP64
 455   return data64(addr_at(sethi_offset), long_at(add_offset));
 456 #else
 457   return data32(long_at(sethi_offset), long_at(add_offset));
 458 #endif
 459 }
 460 
 461 
 462 void NativeMovConstRegPatching::set_data(int x) {
 463 #ifdef _LP64
 464   set_data64_sethi(addr_at(sethi_offset), x);
 465 #else
 466   set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
 467 #endif
 468   set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
 469 
 470   // also store the value into an oop_Relocation cell, if any
 471   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 472   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
 473   if (nm != NULL) {
 474     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 475     oop* oop_addr = NULL;
 476     Metadata** metadata_addr = NULL;
 477     while (iter.next()) {
 478       if (iter.type() == relocInfo::oop_type) {
 479         oop_Relocation *r = iter.oop_reloc();
 480         if (oop_addr == NULL) {
 481           oop_addr = r->oop_addr();
 482           *oop_addr = cast_to_oop(x);
 483         } else {
 484           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
 485         }
 486       }
 487       if (iter.type() == relocInfo::metadata_type) {
 488         metadata_Relocation *r = iter.metadata_reloc();
 489         if (metadata_addr == NULL) {
 490           metadata_addr = r->metadata_addr();
 491           *metadata_addr = (Metadata*)x;
 492         } else {
 493           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
 494         }
 495       }
 496     }
 497   }
 498 }
 499 
 500 
 501 // Code for unit testing implementation of NativeMovConstRegPatching class
 502 void NativeMovConstRegPatching::test() {
 503 #ifdef ASSERT
 504   ResourceMark rm;
 505   CodeBuffer cb("test", 100, 100);
 506   MacroAssembler* a = new MacroAssembler(&cb);
 507   NativeMovConstRegPatching* nm;
 508   uint idx;
 509   int offsets[] = {
 510     0x0,
 511     0x7fffffff,
 512     0x80000000,
 513     0xffffffff,
 514     0x20,
 515     4096,
 516     4097,
 517   };
 518 
 519   VM_Version::allow_all();
 520 
 521   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
 522   a->sethi(al1, I3);
 523   a->nop();
 524   a->add(I3, al1.low10(), I3);
 525   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
 526   a->sethi(al2, O2);
 527   a->nop();
 528   a->add(O2, al2.low10(), O2);
 529 
 530   nm = nativeMovConstRegPatching_at( cb.insts_begin() );
 531   nm->print();
 532 
 533   nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
 534   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 535     nm->set_data( offsets[idx] );
 536     assert(nm->data() == offsets[idx], "check unit test");
 537   }
 538   nm->print();
 539 
 540   VM_Version::revert();
 541 #endif // ASSERT
 542 }
 543 // End code for unit testing implementation of NativeMovConstRegPatching class
 544 
 545 
 546 //-------------------------------------------------------------------
 547 
 548 
 549 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
 550   Untested("copy_instruction_to");
 551   int instruction_size = next_instruction_address() - instruction_address();
 552   for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
 553     *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
 554   }
 555 }
 556 
 557 
 558 void NativeMovRegMem::verify() {
 559   NativeInstruction::verify();
 560   // make sure code pattern is actually a "ld" or "st" of some sort.
 561   int i0 = long_at(0);
 562   int op3 = inv_op3(i0);
 563 
 564   assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
 565 
 566   if (!(is_op(i0, Assembler::ldst_op) &&
 567         inv_immed(i0) &&
 568         0 != (op3 < op3_ldst_int_limit
 569          ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 570          : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
 571   {
 572     int i1 = long_at(ldst_offset);
 573     Register rd = inv_rd(i0);
 574 
 575     op3 = inv_op3(i1);
 576     if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
 577          0 != (op3 < op3_ldst_int_limit
 578               ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 579                : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
 580       fatal("not a ld* or st* op");
 581     }
 582   }
 583 }
 584 
 585 
 586 void NativeMovRegMem::print() {
 587   if (is_immediate()) {
 588     // offset is a signed 13-bit immediate, so casting it to int will not lose significant bits
 589     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %d]", p2i(instruction_address()), (int)offset());
 590   } else {
 591     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", p2i(instruction_address()));
 592   }
 593 }
 594 
 595 
 596 // Code for unit testing implementation of NativeMovRegMem class
 597 void NativeMovRegMem::test() {
 598 #ifdef ASSERT
 599   ResourceMark rm;
 600   CodeBuffer cb("test", 1000, 1000);
 601   MacroAssembler* a = new MacroAssembler(&cb);
 602   NativeMovRegMem* nm;
 603   uint idx = 0;
 604   uint idx1;
 605   int offsets[] = {
 606     0x0,
 607     0xffffffff,
 608     0x7fffffff,
 609     0x80000000,
 610     4096,
 611     4097,
 612     0x20,
 613     0x4000,
 614   };
 615 
 616   VM_Version::allow_all();
 617 
 618   AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
 619   AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
 620   a->ldsw( G5, al1.low10(), G4 ); idx++;
 621   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 622   a->ldsw( G5, I3, G4 ); idx++;
 623   a->ldsb( G5, al1.low10(), G4 ); idx++;
 624   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 625   a->ldsb( G5, I3, G4 ); idx++;
 626   a->ldsh( G5, al1.low10(), G4 ); idx++;
 627   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 628   a->ldsh( G5, I3, G4 ); idx++;
 629   a->lduw( G5, al1.low10(), G4 ); idx++;
 630   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 631   a->lduw( G5, I3, G4 ); idx++;
 632   a->ldub( G5, al1.low10(), G4 ); idx++;
 633   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 634   a->ldub( G5, I3, G4 ); idx++;
 635   a->lduh( G5, al1.low10(), G4 ); idx++;
 636   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 637   a->lduh( G5, I3, G4 ); idx++;
 638   a->ldx( G5, al1.low10(), G4 ); idx++;
 639   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 640   a->ldx( G5, I3, G4 ); idx++;
 641   a->ldd( G5, al1.low10(), G4 ); idx++;
 642   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 643   a->ldd( G5, I3, G4 ); idx++;
 644   a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
 645   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 646   a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
 647 
 648   a->stw( G5, G4, al1.low10() ); idx++;
 649   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 650   a->stw( G5, G4, I3 ); idx++;
 651   a->stb( G5, G4, al1.low10() ); idx++;
 652   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 653   a->stb( G5, G4, I3 ); idx++;
 654   a->sth( G5, G4, al1.low10() ); idx++;
 655   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 656   a->sth( G5, G4, I3 ); idx++;
 657   a->stx( G5, G4, al1.low10() ); idx++;
 658   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 659   a->stx( G5, G4, I3 ); idx++;
 660   a->std( G5, G4, al1.low10() ); idx++;
 661   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 662   a->std( G5, G4, I3 ); idx++;
 663   a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
 664   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 665   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
 666 
 667   nm = nativeMovRegMem_at( cb.insts_begin() );
 668   nm->print();
 669   nm->set_offset( low10(0) );
 670   nm->print();
 671   nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 672   nm->print();
 673 
 674   while (--idx) {
 675     nm = nativeMovRegMem_at( nm->next_instruction_address() );
 676     nm->print();
 677     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
 678       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
 679       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
 680              "check unit test");
 681       nm->print();
 682     }
 683     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 684     nm->print();
 685   }
 686 
 687   VM_Version::revert();
 688 #endif // ASSERT
 689 }
 690 
 691 // End code for unit testing implementation of NativeMovRegMem class
 692 
 693 
 694 //--------------------------------------------------------------------------------
 695 
 696 
 697 void NativeJump::verify() {
 698   NativeInstruction::verify();
 699   int i0 = long_at(sethi_offset);
 700   int i1 = long_at(jmpl_offset);
 701   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 702   // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
 703   Register rd = inv_rd(i0);
 704 #ifndef _LP64
 705   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
 706         (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
 707         (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
 708         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
 709         rd == inv_rs1(i1))) {
 710     fatal("not a jump_to instruction");
 711   }
 712 #else
 713   // In LP64, the jump instruction location varies for non relocatable
 714   // jumps, for example is could be sethi, xor, jmp instead of the
 715   // 7 instructions for sethi.  So let's check sethi only.
 716   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
 717     fatal("not a jump_to instruction");
 718   }
 719 #endif
 720 }
 721 
 722 
 723 void NativeJump::print() {
 724   tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, p2i(instruction_address()), p2i(jump_destination()));
 725 }
 726 
 727 
 728 // Code for unit testing implementation of NativeJump class
 729 void NativeJump::test() {
 730 #ifdef ASSERT
 731   ResourceMark rm;
 732   CodeBuffer cb("test", 100, 100);
 733   MacroAssembler* a = new MacroAssembler(&cb);
 734   NativeJump* nj;
 735   uint idx;
 736   int offsets[] = {
 737     0x0,
 738     0xffffffff,
 739     0x7fffffff,
 740     0x80000000,
 741     4096,
 742     4097,
 743     0x20,
 744     0x4000,
 745   };
 746 
 747   VM_Version::allow_all();
 748 
 749   AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
 750   a->sethi(al, I3);
 751   a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
 752   a->delayed()->nop();
 753   a->sethi(al, I3);
 754   a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
 755   a->delayed()->nop();
 756 
 757   nj = nativeJump_at( cb.insts_begin() );
 758   nj->print();
 759 
 760   nj = nativeJump_at( nj->next_instruction_address() );
 761   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 762     nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
 763     assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
 764     nj->print();
 765   }
 766 
 767   VM_Version::revert();
 768 #endif // ASSERT
 769 }
 770 // End code for unit testing implementation of NativeJump class
 771 
 772 
 773 void NativeJump::insert(address code_pos, address entry) {
 774   Unimplemented();
 775 }
 776 
 777 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
 778 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
 779 // Atomic write can be only with 1 word.
 780 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 781   // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
 782   // in the header of the nmethod, within a short branch's span of the patch point.
 783   // Set up the jump sequence using NativeJump::insert, and then use an annulled
 784   // unconditional branch at the target site (an atomic 1-word update).
 785   // Limitations:  You can only patch nmethods, with any given nmethod patched at
 786   // most once, and the patch must be in the nmethod's header.
 787   // It's messy, but you can ask the CodeCache for the nmethod containing the
 788   // target address.
 789 
 790   // %%%%% For now, do something MT-stupid:
 791   ResourceMark rm;
 792   int code_size = 1 * BytesPerInstWord;
 793   CodeBuffer cb(verified_entry, code_size + 1);
 794   MacroAssembler* a = new MacroAssembler(&cb);
 795   a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
 796   ICache::invalidate_range(verified_entry, code_size);
 797 }
 798 
 799 
 800 void NativeIllegalInstruction::insert(address code_pos) {
 801   NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
 802   nii->set_long_at(0, illegal_instruction());
 803 }
 804 
 805 static int illegal_instruction_bits = 0;
 806 
 807 int NativeInstruction::illegal_instruction() {
 808   if (illegal_instruction_bits == 0) {
 809     ResourceMark rm;
 810     char buf[40];
 811     CodeBuffer cbuf((address)&buf[0], 20);
 812     MacroAssembler* a = new MacroAssembler(&cbuf);
 813     address ia = a->pc();
 814     a->trap(ST_RESERVED_FOR_USER_0 + 1);
 815     int bits = *(int*)ia;
 816     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
 817     illegal_instruction_bits = bits;
 818     assert(illegal_instruction_bits != 0, "oops");
 819   }
 820   return illegal_instruction_bits;
 821 }
 822 
 823 static int ic_miss_trap_bits = 0;
 824 
 825 bool NativeInstruction::is_ic_miss_trap() {
 826   if (ic_miss_trap_bits == 0) {
 827     ResourceMark rm;
 828     char buf[40];
 829     CodeBuffer cbuf((address)&buf[0], 20);
 830     MacroAssembler* a = new MacroAssembler(&cbuf);
 831     address ia = a->pc();
 832     a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
 833     int bits = *(int*)ia;
 834     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
 835     ic_miss_trap_bits = bits;
 836     assert(ic_miss_trap_bits != 0, "oops");
 837   }
 838   return long_at(0) == ic_miss_trap_bits;
 839 }
 840 
 841 
 842 bool NativeInstruction::is_illegal() {
 843   if (illegal_instruction_bits == 0) {
 844     return false;
 845   }
 846   return long_at(0) == illegal_instruction_bits;
 847 }
 848 
 849 
 850 void NativeGeneralJump::verify() {
 851   assert(((NativeInstruction *)this)->is_jump() ||
 852          ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
 853 }
 854 
 855 
 856 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 857   Assembler::Condition condition = Assembler::always;
 858   int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
 859     Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
 860   NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
 861   ni->set_long_at(0, x);
 862 }
 863 
 864 
 865 // MT-safe patching of a jmp instruction (and following word).
 866 // First patches the second word, and then atomicly replaces
 867 // the first word with the first new instruction word.
 868 // Other processors might briefly see the old first word
 869 // followed by the new second word.  This is OK if the old
 870 // second word is harmless, and the new second word may be
 871 // harmlessly executed in the delay slot of the call.
 872 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 873    assert(Patching_lock->is_locked() ||
 874          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 875    assert (instr_addr != NULL, "illegal address for code patching");
 876    NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
 877    assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
 878    int i0 = ((int*)code_buffer)[0];
 879    int i1 = ((int*)code_buffer)[1];
 880    int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
 881    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 882           *contention_addr == nop_instruction(),
 883           "must not interfere with original call");
 884    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
 885    h_jump->set_long_at(1*BytesPerInstWord, i1);
 886    h_jump->set_long_at(0*BytesPerInstWord, i0);
 887    // NOTE:  It is possible that another thread T will execute
 888    // only the second patched word.
 889    // In other words, since the original instruction is this
 890    //    jmp patching_stub; nop                    (NativeGeneralJump)
 891    // and the new sequence from the buffer is this:
 892    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
 893    // what T will execute is this:
 894    //    jmp patching_stub; add %r, %lo(K), %r
 895    // thereby putting garbage into %r before calling the patching stub.
 896    // This is OK, because the patching stub ignores the value of %r.
 897 
 898    // Make sure the first-patched instruction, which may co-exist
 899    // briefly with the call, will do something harmless.
 900    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 901           *contention_addr == nop_instruction(),
 902           "must not interfere with original call");
 903 }