1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "nativeInst_sparc.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/handles.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/stubRoutines.hpp"
  35 #include "utilities/ostream.hpp"
  36 #ifdef COMPILER1
  37 #include "c1/c1_Runtime1.hpp"
  38 #endif
  39 
  40 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
  41   ResourceMark rm;
  42   CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
  43   MacroAssembler* _masm = new MacroAssembler(&buf);
  44   Register destreg;
  45 
  46   destreg = inv_rd(*(unsigned int *)instaddr);
  47   // Generate a the new sequence
  48   _masm->patchable_sethi(x, destreg);
  49   ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
  50 }
  51 
  52 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
  53   ResourceMark rm;
  54   unsigned char buffer[10 * BytesPerInstWord];
  55   CodeBuffer buf(buffer, 10 * BytesPerInstWord);
  56   MacroAssembler masm(&buf);
  57 
  58   Register destreg = inv_rd(*(unsigned int *)instaddr);
  59   // Generate the proper sequence into a temporary buffer and compare
  60   // it with the original sequence.
  61   masm.patchable_sethi(x, destreg);
  62   int len = buffer - masm.pc();
  63   for (int i = 0; i < len; i++) {
  64     guarantee(instaddr[i] == buffer[i], "instructions must match");
  65   }
  66 }
  67 
  68 void NativeInstruction::verify() {
  69   // make sure code pattern is actually an instruction address
  70   address addr = addr_at(0);
  71   if (addr == 0 || ((intptr_t)addr & 3) != 0) {
  72     fatal("not an instruction address");
  73   }
  74 }
  75 
  76 void NativeInstruction::print() {
  77   tty->print_cr(INTPTR_FORMAT ": 0x%x", p2i(addr_at(0)), long_at(0));
  78 }
  79 
  80 void NativeInstruction::set_long_at(int offset, int i) {
  81   address addr = addr_at(offset);
  82   *(int*)addr = i;
  83   ICache::invalidate_word(addr);
  84 }
  85 
  86 void NativeInstruction::set_jlong_at(int offset, jlong i) {
  87   address addr = addr_at(offset);
  88   *(jlong*)addr = i;
  89   // Don't need to invalidate 2 words here, because
  90   // the flush instruction operates on doublewords.
  91   ICache::invalidate_word(addr);
  92 }
  93 
  94 void NativeInstruction::set_addr_at(int offset, address x) {
  95   address addr = addr_at(offset);
  96   assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
  97   *(uintptr_t*)addr = (uintptr_t)x;
  98   // Don't need to invalidate 2 words here in the 64-bit case,
  99   // because the flush instruction operates on doublewords.
 100   ICache::invalidate_word(addr);
 101   // The Intel code has this assertion for NativeCall::set_destination,
 102   // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
 103   // NativeJump::set_jump_destination, and NativePushImm32::set_data
 104   //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
 105 }
 106 
 107 bool NativeInstruction::is_zero_test(Register &reg) {
 108   int x = long_at(0);
 109   Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
 110   if (is_op3(x, temp, Assembler::arith_op) &&
 111       inv_immed(x) && inv_rd(x) == G0) {
 112       if (inv_rs1(x) == G0) {
 113         reg = inv_rs2(x);
 114         return true;
 115       } else if (inv_rs2(x) == G0) {
 116         reg = inv_rs1(x);
 117         return true;
 118       }
 119   }
 120   return false;
 121 }
 122 
 123 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
 124   int x = long_at(0);
 125   if (is_op(x, Assembler::ldst_op) &&
 126       inv_rs1(x) == reg && inv_immed(x)) {
 127     return true;
 128   }
 129   return false;
 130 }
 131 
 132 void NativeCall::verify() {
 133   NativeInstruction::verify();
 134   // make sure code pattern is actually a call instruction
 135   int x = long_at(0);
 136   if (!is_op(x, Assembler::call_op)) {
 137     fatal("not a call: 0x%x @ " INTPTR_FORMAT, x, p2i(instruction_address()));
 138   }
 139 }
 140 
 141 void NativeCall::print() {
 142   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
 143 }
 144 
 145 
 146 // MT-safe patching of a call instruction (and following word).
 147 // First patches the second word, and then atomicly replaces
 148 // the first word with the first new instruction word.
 149 // Other processors might briefly see the old first word
 150 // followed by the new second word.  This is OK if the old
 151 // second word is harmless, and the new second word may be
 152 // harmlessly executed in the delay slot of the call.
 153 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
 154   assert(Patching_lock->is_locked() ||
 155          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 156    assert (instr_addr != NULL, "illegal address for code patching");
 157    NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
 158    assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
 159    int i0 = ((int*)code_buffer)[0];
 160    int i1 = ((int*)code_buffer)[1];
 161    int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
 162    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 163           *contention_addr == nop_instruction(),
 164           "must not interfere with original call");
 165    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
 166    n_call->set_long_at(1*BytesPerInstWord, i1);
 167    n_call->set_long_at(0*BytesPerInstWord, i0);
 168    // NOTE:  It is possible that another thread T will execute
 169    // only the second patched word.
 170    // In other words, since the original instruction is this
 171    //    call patching_stub; nop                   (NativeCall)
 172    // and the new sequence from the buffer is this:
 173    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
 174    // what T will execute is this:
 175    //    call patching_stub; add %r, %lo(K), %r
 176    // thereby putting garbage into %r before calling the patching stub.
 177    // This is OK, because the patching stub ignores the value of %r.
 178 
 179    // Make sure the first-patched instruction, which may co-exist
 180    // briefly with the call, will do something harmless.
 181    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 182           *contention_addr == nop_instruction(),
 183           "must not interfere with original call");
 184 }
 185 
 186 // Similar to replace_mt_safe, but just changes the destination.  The
 187 // important thing is that free-running threads are able to execute this
 188 // call instruction at all times.  Thus, the displacement field must be
 189 // instruction-word-aligned.  This is always true on SPARC.
 190 //
 191 // Used in the runtime linkage of calls; see class CompiledIC.
 192 void NativeCall::set_destination_mt_safe(address dest) {
 193   assert((Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
 194          CompiledICLocker::is_safe(addr_at(0)),
 195          "concurrent code patching");
 196   // set_destination uses set_long_at which does the ICache::invalidate
 197   set_destination(dest);
 198 }
 199 
 200 // Code for unit testing implementation of NativeCall class
 201 void NativeCall::test() {
 202 #ifdef ASSERT
 203   ResourceMark rm;
 204   CodeBuffer cb("test", 100, 100);
 205   MacroAssembler* a = new MacroAssembler(&cb);
 206   NativeCall  *nc;
 207   uint idx;
 208   int offsets[] = {
 209     0x0,
 210     (int)0xfffffff0,
 211     (int)0x7ffffff0,
 212     (int)0x80000000,
 213     0x20,
 214     0x4000,
 215   };
 216 
 217   VM_Version::allow_all();
 218 
 219   a->call( a->pc(), relocInfo::none );
 220   a->delayed()->nop();
 221   nc = nativeCall_at( cb.insts_begin() );
 222   nc->print();
 223 
 224   nc = nativeCall_overwriting_at( nc->next_instruction_address() );
 225   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 226     nc->set_destination( cb.insts_begin() + offsets[idx] );
 227     assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
 228     nc->print();
 229   }
 230 
 231   nc = nativeCall_before( cb.insts_begin() + 8 );
 232   nc->print();
 233 
 234   VM_Version::revert();
 235 #endif
 236 }
 237 // End code for unit testing implementation of NativeCall class
 238 
 239 //-------------------------------------------------------------------
 240 
 241 void NativeFarCall::set_destination(address dest) {
 242   // Address materialized in the instruction stream, so nothing to do.
 243   return;
 244 #if 0 // What we'd do if we really did want to change the destination
 245   if (destination() == dest) {
 246     return;
 247   }
 248   ResourceMark rm;
 249   CodeBuffer buf(addr_at(0), instruction_size + 1);
 250   MacroAssembler* _masm = new MacroAssembler(&buf);
 251   // Generate the new sequence
 252   AddressLiteral(dest);
 253   _masm->jumpl_to(dest, O7, O7);
 254   ICache::invalidate_range(addr_at(0), instruction_size );
 255 #endif
 256 }
 257 
 258 void NativeFarCall::verify() {
 259   // make sure code pattern is actually a jumpl_to instruction
 260   assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
 261   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 262   nativeJump_at(addr_at(0))->verify();
 263 }
 264 
 265 bool NativeFarCall::is_call_at(address instr) {
 266   return nativeInstruction_at(instr)->is_sethi();
 267 }
 268 
 269 void NativeFarCall::print() {
 270   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
 271 }
 272 
 273 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
 274   nmethod* callee = CodeCache::find_nmethod(destination());
 275   if (callee == NULL) {
 276     return false;
 277   } else {
 278     return destination() == callee->verified_entry_point();
 279   }
 280 }
 281 
 282 // MT-safe patching of a far call.
 283 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
 284   Unimplemented();
 285 }
 286 
 287 // Code for unit testing implementation of NativeFarCall class
 288 void NativeFarCall::test() {
 289   Unimplemented();
 290 }
 291 // End code for unit testing implementation of NativeFarCall class
 292 
 293 //-------------------------------------------------------------------
 294 
 295 
 296 void NativeMovConstReg::verify() {
 297   NativeInstruction::verify();
 298   // make sure code pattern is actually a "set_metadata" synthetic instruction
 299   // see MacroAssembler::set_oop()
 300   int i0 = long_at(sethi_offset);
 301   int i1 = long_at(add_offset);
 302 
 303   // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
 304   Register rd = inv_rd(i0);
 305   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
 306     fatal("not a set_metadata");
 307   }
 308 }
 309 
 310 
 311 void NativeMovConstReg::print() {
 312   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
 313 }
 314 
 315 
 316 intptr_t NativeMovConstReg::data() const {
 317   return data64(addr_at(sethi_offset), long_at(add_offset));
 318 }
 319 
 320 
 321 void NativeMovConstReg::set_data(intptr_t x) {
 322   set_data64_sethi(addr_at(sethi_offset), x);
 323   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
 324 
 325   // also store the value into an oop_Relocation cell, if any
 326   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 327   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
 328   if (nm != NULL) {
 329     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 330     oop* oop_addr = NULL;
 331     Metadata** metadata_addr = NULL;
 332     while (iter.next()) {
 333       if (iter.type() == relocInfo::oop_type) {
 334         oop_Relocation *r = iter.oop_reloc();
 335         if (oop_addr == NULL) {
 336           oop_addr = r->oop_addr();
 337           *oop_addr = cast_to_oop(x);
 338         } else {
 339           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
 340         }
 341       }
 342       if (iter.type() == relocInfo::metadata_type) {
 343         metadata_Relocation *r = iter.metadata_reloc();
 344         if (metadata_addr == NULL) {
 345           metadata_addr = r->metadata_addr();
 346           *metadata_addr = (Metadata*)x;
 347         } else {
 348           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
 349         }
 350       }
 351     }
 352   }
 353 }
 354 
 355 
 356 // Code for unit testing implementation of NativeMovConstReg class
 357 void NativeMovConstReg::test() {
 358 #ifdef ASSERT
 359   ResourceMark rm;
 360   CodeBuffer cb("test", 100, 100);
 361   MacroAssembler* a = new MacroAssembler(&cb);
 362   NativeMovConstReg* nm;
 363   uint idx;
 364   int offsets[] = {
 365     0x0,
 366     (int)0x7fffffff,
 367     (int)0x80000000,
 368     (int)0xffffffff,
 369     0x20,
 370     4096,
 371     4097,
 372   };
 373 
 374   VM_Version::allow_all();
 375 
 376   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
 377   a->sethi(al1, I3);
 378   a->add(I3, al1.low10(), I3);
 379   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
 380   a->sethi(al2, O2);
 381   a->add(O2, al2.low10(), O2);
 382 
 383   nm = nativeMovConstReg_at( cb.insts_begin() );
 384   nm->print();
 385 
 386   nm = nativeMovConstReg_at( nm->next_instruction_address() );
 387   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 388     nm->set_data( offsets[idx] );
 389     assert(nm->data() == offsets[idx], "check unit test");
 390   }
 391   nm->print();
 392 
 393   VM_Version::revert();
 394 #endif
 395 }
 396 // End code for unit testing implementation of NativeMovConstReg class
 397 
 398 //-------------------------------------------------------------------
 399 
 400 void NativeMovConstReg32::verify() {
 401   NativeInstruction::verify();
 402   // make sure code pattern is actually a "set_metadata" synthetic instruction
 403   // see MacroAssembler::set_oop()
 404   int i0 = long_at(sethi_offset);
 405   int i1 = long_at(add_offset);
 406 
 407   // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
 408   Register rd = inv_rd(i0);
 409   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
 410     fatal("not a set_metadata");
 411   }
 412 }
 413 
 414 
 415 void NativeMovConstReg32::print() {
 416   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
 417 }
 418 
 419 
 420 intptr_t NativeMovConstReg32::data() const {
 421   return data32(long_at(sethi_offset), long_at(add_offset));
 422 }
 423 
 424 
 425 void NativeMovConstReg32::set_data(intptr_t x) {
 426   set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
 427   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
 428 
 429   // also store the value into an oop_Relocation cell, if any
 430   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 431   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
 432   if (nm != NULL) {
 433     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 434     oop* oop_addr = NULL;
 435     Metadata** metadata_addr = NULL;
 436     while (iter.next()) {
 437       if (iter.type() == relocInfo::oop_type) {
 438         oop_Relocation *r = iter.oop_reloc();
 439         if (oop_addr == NULL) {
 440           oop_addr = r->oop_addr();
 441           *oop_addr = cast_to_oop(x);
 442         } else {
 443           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
 444         }
 445       }
 446       if (iter.type() == relocInfo::metadata_type) {
 447         metadata_Relocation *r = iter.metadata_reloc();
 448         if (metadata_addr == NULL) {
 449           metadata_addr = r->metadata_addr();
 450           *metadata_addr = (Metadata*)x;
 451         } else {
 452           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
 453         }
 454       }
 455     }
 456   }
 457 }
 458 
 459 //-------------------------------------------------------------------
 460 
 461 void NativeMovConstRegPatching::verify() {
 462   NativeInstruction::verify();
 463   // Make sure code pattern is sethi/nop/add.
 464   int i0 = long_at(sethi_offset);
 465   int i1 = long_at(nop_offset);
 466   int i2 = long_at(add_offset);
 467   assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 468 
 469   // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
 470   // The casual reader should note that on Sparc a nop is a special case if sethi
 471   // in which the destination register is %g0.
 472   Register rd0 = inv_rd(i0);
 473   Register rd1 = inv_rd(i1);
 474   if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
 475         is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
 476         is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
 477         inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
 478         rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
 479     fatal("not a set_metadata");
 480   }
 481 }
 482 
 483 
 484 void NativeMovConstRegPatching::print() {
 485   tty->print_cr(INTPTR_FORMAT ": mov reg, 0x%x", p2i(instruction_address()), data());
 486 }
 487 
 488 
 489 int NativeMovConstRegPatching::data() const {
 490   return data64(addr_at(sethi_offset), long_at(add_offset));
 491 }
 492 
 493 
 494 void NativeMovConstRegPatching::set_data(int x) {
 495   set_data64_sethi(addr_at(sethi_offset), x);
 496   set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
 497 
 498   // also store the value into an oop_Relocation cell, if any
 499   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 500   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
 501   if (nm != NULL) {
 502     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 503     oop* oop_addr = NULL;
 504     Metadata** metadata_addr = NULL;
 505     while (iter.next()) {
 506       if (iter.type() == relocInfo::oop_type) {
 507         oop_Relocation *r = iter.oop_reloc();
 508         if (oop_addr == NULL) {
 509           oop_addr = r->oop_addr();
 510           *oop_addr = cast_to_oop(x);
 511         } else {
 512           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
 513         }
 514       }
 515       if (iter.type() == relocInfo::metadata_type) {
 516         metadata_Relocation *r = iter.metadata_reloc();
 517         if (metadata_addr == NULL) {
 518           metadata_addr = r->metadata_addr();
 519           *metadata_addr = (Metadata*)x;
 520         } else {
 521           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
 522         }
 523       }
 524     }
 525   }
 526 }
 527 
 528 
 529 // Code for unit testing implementation of NativeMovConstRegPatching class
 530 void NativeMovConstRegPatching::test() {
 531 #ifdef ASSERT
 532   ResourceMark rm;
 533   CodeBuffer cb("test", 100, 100);
 534   MacroAssembler* a = new MacroAssembler(&cb);
 535   NativeMovConstRegPatching* nm;
 536   uint idx;
 537   int offsets[] = {
 538     0x0,
 539     (int)0x7fffffff,
 540     (int)0x80000000,
 541     (int)0xffffffff,
 542     0x20,
 543     4096,
 544     4097,
 545   };
 546 
 547   VM_Version::allow_all();
 548 
 549   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
 550   a->sethi(al1, I3);
 551   a->nop();
 552   a->add(I3, al1.low10(), I3);
 553   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
 554   a->sethi(al2, O2);
 555   a->nop();
 556   a->add(O2, al2.low10(), O2);
 557 
 558   nm = nativeMovConstRegPatching_at( cb.insts_begin() );
 559   nm->print();
 560 
 561   nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
 562   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 563     nm->set_data( offsets[idx] );
 564     assert(nm->data() == offsets[idx], "check unit test");
 565   }
 566   nm->print();
 567 
 568   VM_Version::revert();
 569 #endif // ASSERT
 570 }
 571 // End code for unit testing implementation of NativeMovConstRegPatching class
 572 
 573 
 574 //-------------------------------------------------------------------
 575 
 576 
 577 void NativeMovRegMem::verify() {
 578   NativeInstruction::verify();
 579   // make sure code pattern is actually a "ld" or "st" of some sort.
 580   int i0 = long_at(0);
 581   int op3 = inv_op3(i0);
 582 
 583   assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
 584 
 585   if (!(is_op(i0, Assembler::ldst_op) &&
 586         inv_immed(i0) &&
 587         0 != (op3 < op3_ldst_int_limit
 588          ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 589          : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
 590   {
 591     int i1 = long_at(ldst_offset);
 592     Register rd = inv_rd(i0);
 593 
 594     op3 = inv_op3(i1);
 595     if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
 596          0 != (op3 < op3_ldst_int_limit
 597               ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 598                : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
 599       fatal("not a ld* or st* op");
 600     }
 601   }
 602 }
 603 
 604 
 605 void NativeMovRegMem::print() {
 606   if (is_immediate()) {
 607     // offset is a signed 13-bit immediate, so casting it to int will not lose significant bits
 608     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %d]", p2i(instruction_address()), (int)offset());
 609   } else {
 610     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", p2i(instruction_address()));
 611   }
 612 }
 613 
 614 
 615 // Code for unit testing implementation of NativeMovRegMem class
 616 void NativeMovRegMem::test() {
 617 #ifdef ASSERT
 618   ResourceMark rm;
 619   CodeBuffer cb("test", 1000, 1000);
 620   MacroAssembler* a = new MacroAssembler(&cb);
 621   NativeMovRegMem* nm;
 622   uint idx = 0;
 623   uint idx1;
 624   int offsets[] = {
 625     0x0,
 626     (int)0xffffffff,
 627     (int)0x7fffffff,
 628     (int)0x80000000,
 629     4096,
 630     4097,
 631     0x20,
 632     0x4000,
 633   };
 634 
 635   VM_Version::allow_all();
 636 
 637   AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
 638   AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
 639   a->ldsw( G5, al1.low10(), G4 ); idx++;
 640   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 641   a->ldsw( G5, I3, G4 ); idx++;
 642   a->ldsb( G5, al1.low10(), G4 ); idx++;
 643   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 644   a->ldsb( G5, I3, G4 ); idx++;
 645   a->ldsh( G5, al1.low10(), G4 ); idx++;
 646   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 647   a->ldsh( G5, I3, G4 ); idx++;
 648   a->lduw( G5, al1.low10(), G4 ); idx++;
 649   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 650   a->lduw( G5, I3, G4 ); idx++;
 651   a->ldub( G5, al1.low10(), G4 ); idx++;
 652   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 653   a->ldub( G5, I3, G4 ); idx++;
 654   a->lduh( G5, al1.low10(), G4 ); idx++;
 655   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 656   a->lduh( G5, I3, G4 ); idx++;
 657   a->ldx( G5, al1.low10(), G4 ); idx++;
 658   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 659   a->ldx( G5, I3, G4 ); idx++;
 660   a->ldd( G5, al1.low10(), G4 ); idx++;
 661   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 662   a->ldd( G5, I3, G4 ); idx++;
 663   a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
 664   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 665   a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
 666 
 667   a->stw( G5, G4, al1.low10() ); idx++;
 668   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 669   a->stw( G5, G4, I3 ); idx++;
 670   a->stb( G5, G4, al1.low10() ); idx++;
 671   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 672   a->stb( G5, G4, I3 ); idx++;
 673   a->sth( G5, G4, al1.low10() ); idx++;
 674   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 675   a->sth( G5, G4, I3 ); idx++;
 676   a->stx( G5, G4, al1.low10() ); idx++;
 677   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 678   a->stx( G5, G4, I3 ); idx++;
 679   a->std( G5, G4, al1.low10() ); idx++;
 680   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 681   a->std( G5, G4, I3 ); idx++;
 682   a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
 683   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 684   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
 685 
 686   nm = nativeMovRegMem_at( cb.insts_begin() );
 687   nm->print();
 688   nm->set_offset( low10(0) );
 689   nm->print();
 690   nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 691   nm->print();
 692 
 693   while (--idx) {
 694     nm = nativeMovRegMem_at( nm->next_instruction_address() );
 695     nm->print();
 696     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
 697       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
 698       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
 699              "check unit test");
 700       nm->print();
 701     }
 702     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 703     nm->print();
 704   }
 705 
 706   VM_Version::revert();
 707 #endif // ASSERT
 708 }
 709 
 710 // End code for unit testing implementation of NativeMovRegMem class
 711 
 712 
 713 //--------------------------------------------------------------------------------
 714 
 715 
 716 void NativeJump::verify() {
 717   NativeInstruction::verify();
 718   int i0 = long_at(sethi_offset);
 719   int i1 = long_at(jmpl_offset);
 720   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 721   // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
 722   Register rd = inv_rd(i0);
 723   // In LP64, the jump instruction location varies for non relocatable
 724   // jumps, for example is could be sethi, xor, jmp instead of the
 725   // 7 instructions for sethi.  So let's check sethi only.
 726   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
 727     fatal("not a jump_to instruction");
 728   }
 729 }
 730 
 731 
 732 void NativeJump::print() {
 733   tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, p2i(instruction_address()), p2i(jump_destination()));
 734 }
 735 
 736 
 737 // Code for unit testing implementation of NativeJump class
 738 void NativeJump::test() {
 739 #ifdef ASSERT
 740   ResourceMark rm;
 741   CodeBuffer cb("test", 100, 100);
 742   MacroAssembler* a = new MacroAssembler(&cb);
 743   NativeJump* nj;
 744   uint idx;
 745   int offsets[] = {
 746     0x0,
 747     (int)0xffffffff,
 748     (int)0x7fffffff,
 749     (int)0x80000000,
 750     4096,
 751     4097,
 752     0x20,
 753     0x4000,
 754   };
 755 
 756   VM_Version::allow_all();
 757 
 758   AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
 759   a->sethi(al, I3);
 760   a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
 761   a->delayed()->nop();
 762   a->sethi(al, I3);
 763   a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
 764   a->delayed()->nop();
 765 
 766   nj = nativeJump_at( cb.insts_begin() );
 767   nj->print();
 768 
 769   nj = nativeJump_at( nj->next_instruction_address() );
 770   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 771     nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
 772     assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
 773     nj->print();
 774   }
 775 
 776   VM_Version::revert();
 777 #endif // ASSERT
 778 }
 779 // End code for unit testing implementation of NativeJump class
 780 
 781 
 782 void NativeJump::insert(address code_pos, address entry) {
 783   Unimplemented();
 784 }
 785 
 786 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
 787 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
 788 // Atomic write can be only with 1 word.
 789 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 790   // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
 791   // in the header of the nmethod, within a short branch's span of the patch point.
 792   // Set up the jump sequence using NativeJump::insert, and then use an annulled
 793   // unconditional branch at the target site (an atomic 1-word update).
 794   // Limitations:  You can only patch nmethods, with any given nmethod patched at
 795   // most once, and the patch must be in the nmethod's header.
 796   // It's messy, but you can ask the CodeCache for the nmethod containing the
 797   // target address.
 798 
 799   // %%%%% For now, do something MT-stupid:
 800   ResourceMark rm;
 801   int code_size = 1 * BytesPerInstWord;
 802   CodeBuffer cb(verified_entry, code_size + 1);
 803   MacroAssembler* a = new MacroAssembler(&cb);
 804   a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
 805   ICache::invalidate_range(verified_entry, code_size);
 806 }
 807 
 808 
 809 void NativeIllegalInstruction::insert(address code_pos) {
 810   NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
 811   nii->set_long_at(0, illegal_instruction());
 812 }
 813 
 814 static int illegal_instruction_bits = 0;
 815 
 816 int NativeInstruction::illegal_instruction() {
 817   if (illegal_instruction_bits == 0) {
 818     ResourceMark rm;
 819     char buf[40];
 820     CodeBuffer cbuf((address)&buf[0], 20);
 821     MacroAssembler* a = new MacroAssembler(&cbuf);
 822     address ia = a->pc();
 823     a->trap(ST_RESERVED_FOR_USER_0 + 1);
 824     int bits = *(int*)ia;
 825     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
 826     illegal_instruction_bits = bits;
 827     assert(illegal_instruction_bits != 0, "oops");
 828   }
 829   return illegal_instruction_bits;
 830 }
 831 
 832 static int ic_miss_trap_bits = 0;
 833 
 834 bool NativeInstruction::is_ic_miss_trap() {
 835   if (ic_miss_trap_bits == 0) {
 836     ResourceMark rm;
 837     char buf[40];
 838     CodeBuffer cbuf((address)&buf[0], 20);
 839     MacroAssembler* a = new MacroAssembler(&cbuf);
 840     address ia = a->pc();
 841     a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
 842     int bits = *(int*)ia;
 843     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
 844     ic_miss_trap_bits = bits;
 845     assert(ic_miss_trap_bits != 0, "oops");
 846   }
 847   return long_at(0) == ic_miss_trap_bits;
 848 }
 849 
 850 
 851 bool NativeInstruction::is_illegal() {
 852   if (illegal_instruction_bits == 0) {
 853     return false;
 854   }
 855   return long_at(0) == illegal_instruction_bits;
 856 }
 857 
 858 
 859 void NativeGeneralJump::verify() {
 860   assert(((NativeInstruction *)this)->is_jump() ||
 861          ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
 862 }
 863 
 864 
 865 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 866   Assembler::Condition condition = Assembler::always;
 867   int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
 868     Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
 869   NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
 870   ni->set_long_at(0, x);
 871 }
 872 
 873 
 874 // MT-safe patching of a jmp instruction (and following word).
 875 // First patches the second word, and then atomicly replaces
 876 // the first word with the first new instruction word.
 877 // Other processors might briefly see the old first word
 878 // followed by the new second word.  This is OK if the old
 879 // second word is harmless, and the new second word may be
 880 // harmlessly executed in the delay slot of the call.
 881 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 882    assert(Patching_lock->is_locked() ||
 883          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 884    assert (instr_addr != NULL, "illegal address for code patching");
 885    NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
 886    assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
 887    int i0 = ((int*)code_buffer)[0];
 888    int i1 = ((int*)code_buffer)[1];
 889    int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
 890    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 891           *contention_addr == nop_instruction(),
 892           "must not interfere with original call");
 893    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
 894    h_jump->set_long_at(1*BytesPerInstWord, i1);
 895    h_jump->set_long_at(0*BytesPerInstWord, i0);
 896    // NOTE:  It is possible that another thread T will execute
 897    // only the second patched word.
 898    // In other words, since the original instruction is this
 899    //    jmp patching_stub; nop                    (NativeGeneralJump)
 900    // and the new sequence from the buffer is this:
 901    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
 902    // what T will execute is this:
 903    //    jmp patching_stub; add %r, %lo(K), %r
 904    // thereby putting garbage into %r before calling the patching stub.
 905    // This is OK, because the patching stub ignores the value of %r.
 906 
 907    // Make sure the first-patched instruction, which may co-exist
 908    // briefly with the call, will do something harmless.
 909    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 910           *contention_addr == nop_instruction(),
 911           "must not interfere with original call");
 912 }