1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "nativeInst_sparc.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "runtime/stubRoutines.hpp"
  34 #include "utilities/ostream.hpp"
  35 #ifdef COMPILER1
  36 #include "c1/c1_Runtime1.hpp"
  37 #endif
  38 
  39 
  40 bool NativeInstruction::is_dtrace_trap() {
  41   return !is_nop();
  42 }
  43 
  44 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
  45   ResourceMark rm;
  46   CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
  47   MacroAssembler* _masm = new MacroAssembler(&buf);
  48   Register destreg;
  49 
  50   destreg = inv_rd(*(unsigned int *)instaddr);
  51   // Generate a the new sequence
  52   _masm->patchable_sethi(x, destreg);
  53   ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
  54 }
  55 
  56 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
  57   ResourceMark rm;
  58   unsigned char buffer[10 * BytesPerInstWord];
  59   CodeBuffer buf(buffer, 10 * BytesPerInstWord);
  60   MacroAssembler masm(&buf);
  61 
  62   Register destreg = inv_rd(*(unsigned int *)instaddr);
  63   // Generate the proper sequence into a temporary buffer and compare
  64   // it with the original sequence.
  65   masm.patchable_sethi(x, destreg);
  66   int len = buffer - masm.pc();
  67   for (int i = 0; i < len; i++) {
  68     assert(instaddr[i] == buffer[i], "instructions must match");
  69   }
  70 }
  71 
  72 void NativeInstruction::verify() {
  73   // make sure code pattern is actually an instruction address
  74   address addr = addr_at(0);
  75   if (addr == 0 || ((intptr_t)addr & 3) != 0) {
  76     fatal("not an instruction address");
  77   }
  78 }
  79 
  80 void NativeInstruction::print() {
  81   tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
  82 }
  83 
  84 void NativeInstruction::set_long_at(int offset, int i) {
  85   address addr = addr_at(offset);
  86   *(int*)addr = i;
  87   ICache::invalidate_word(addr);
  88 }
  89 
  90 void NativeInstruction::set_jlong_at(int offset, jlong i) {
  91   address addr = addr_at(offset);
  92   *(jlong*)addr = i;
  93   // Don't need to invalidate 2 words here, because
  94   // the flush instruction operates on doublewords.
  95   ICache::invalidate_word(addr);
  96 }
  97 
  98 void NativeInstruction::set_addr_at(int offset, address x) {
  99   address addr = addr_at(offset);
 100   assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
 101   *(uintptr_t*)addr = (uintptr_t)x;
 102   // Don't need to invalidate 2 words here in the 64-bit case,
 103   // because the flush instruction operates on doublewords.
 104   ICache::invalidate_word(addr);
 105   // The Intel code has this assertion for NativeCall::set_destination,
 106   // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
 107   // NativeJump::set_jump_destination, and NativePushImm32::set_data
 108   //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
 109 }
 110 
 111 bool NativeInstruction::is_zero_test(Register &reg) {
 112   int x = long_at(0);
 113   Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
 114   if (is_op3(x, temp, Assembler::arith_op) &&
 115       inv_immed(x) && inv_rd(x) == G0) {
 116       if (inv_rs1(x) == G0) {
 117         reg = inv_rs2(x);
 118         return true;
 119       } else if (inv_rs2(x) == G0) {
 120         reg = inv_rs1(x);
 121         return true;
 122       }
 123   }
 124   return false;
 125 }
 126 
 127 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
 128   int x = long_at(0);
 129   if (is_op(x, Assembler::ldst_op) &&
 130       inv_rs1(x) == reg && inv_immed(x)) {
 131     return true;
 132   }
 133   return false;
 134 }
 135 
 136 void NativeCall::verify() {
 137   NativeInstruction::verify();
 138   // make sure code pattern is actually a call instruction
 139   if (!is_op(long_at(0), Assembler::call_op)) {
 140     fatal("not a call");
 141   }
 142 }
 143 
 144 void NativeCall::print() {
 145   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
 146 }
 147 
 148 
 149 // MT-safe patching of a call instruction (and following word).
 150 // First patches the second word, and then atomicly replaces
 151 // the first word with the first new instruction word.
 152 // Other processors might briefly see the old first word
 153 // followed by the new second word.  This is OK if the old
 154 // second word is harmless, and the new second word may be
 155 // harmlessly executed in the delay slot of the call.
 156 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
 157   assert(Patching_lock->is_locked() ||
 158          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 159    assert (instr_addr != NULL, "illegal address for code patching");
 160    NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
 161    assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
 162    int i0 = ((int*)code_buffer)[0];
 163    int i1 = ((int*)code_buffer)[1];
 164    int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
 165    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 166           *contention_addr == nop_instruction(),
 167           "must not interfere with original call");
 168    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
 169    n_call->set_long_at(1*BytesPerInstWord, i1);
 170    n_call->set_long_at(0*BytesPerInstWord, i0);
 171    // NOTE:  It is possible that another thread T will execute
 172    // only the second patched word.
 173    // In other words, since the original instruction is this
 174    //    call patching_stub; nop                   (NativeCall)
 175    // and the new sequence from the buffer is this:
 176    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
 177    // what T will execute is this:
 178    //    call patching_stub; add %r, %lo(K), %r
 179    // thereby putting garbage into %r before calling the patching stub.
 180    // This is OK, because the patching stub ignores the value of %r.
 181 
 182    // Make sure the first-patched instruction, which may co-exist
 183    // briefly with the call, will do something harmless.
 184    assert(inv_op(*contention_addr) == Assembler::arith_op ||
 185           *contention_addr == nop_instruction(),
 186           "must not interfere with original call");
 187 }
 188 
 189 // Similar to replace_mt_safe, but just changes the destination.  The
 190 // important thing is that free-running threads are able to execute this
 191 // call instruction at all times.  Thus, the displacement field must be
 192 // instruction-word-aligned.  This is always true on SPARC.
 193 //
 194 // Used in the runtime linkage of calls; see class CompiledIC.
 195 void NativeCall::set_destination_mt_safe(address dest) {
 196   assert(Patching_lock->is_locked() ||
 197          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
 198   // set_destination uses set_long_at which does the ICache::invalidate
 199   set_destination(dest);
 200 }
 201 
 202 // Code for unit testing implementation of NativeCall class
 203 void NativeCall::test() {
 204 #ifdef ASSERT
 205   ResourceMark rm;
 206   CodeBuffer cb("test", 100, 100);
 207   MacroAssembler* a = new MacroAssembler(&cb);
 208   NativeCall  *nc;
 209   uint idx;
 210   int offsets[] = {
 211     0x0,
 212     0xfffffff0,
 213     0x7ffffff0,
 214     0x80000000,
 215     0x20,
 216     0x4000,
 217   };
 218 
 219   VM_Version::allow_all();
 220 
 221   a->call( a->pc(), relocInfo::none );
 222   a->delayed()->nop();
 223   nc = nativeCall_at( cb.insts_begin() );
 224   nc->print();
 225 
 226   nc = nativeCall_overwriting_at( nc->next_instruction_address() );
 227   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 228     nc->set_destination( cb.insts_begin() + offsets[idx] );
 229     assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
 230     nc->print();
 231   }
 232 
 233   nc = nativeCall_before( cb.insts_begin() + 8 );
 234   nc->print();
 235 
 236   VM_Version::revert();
 237 #endif
 238 }
 239 // End code for unit testing implementation of NativeCall class
 240 
 241 //-------------------------------------------------------------------
 242 
 243 #ifdef _LP64
 244 
 245 void NativeFarCall::set_destination(address dest) {
 246   // Address materialized in the instruction stream, so nothing to do.
 247   return;
 248 #if 0 // What we'd do if we really did want to change the destination
 249   if (destination() == dest) {
 250     return;
 251   }
 252   ResourceMark rm;
 253   CodeBuffer buf(addr_at(0), instruction_size + 1);
 254   MacroAssembler* _masm = new MacroAssembler(&buf);
 255   // Generate the new sequence
 256   AddressLiteral(dest);
 257   _masm->jumpl_to(dest, O7, O7);
 258   ICache::invalidate_range(addr_at(0), instruction_size );
 259 #endif
 260 }
 261 
 262 void NativeFarCall::verify() {
 263   // make sure code pattern is actually a jumpl_to instruction
 264   assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
 265   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 266   nativeJump_at(addr_at(0))->verify();
 267 }
 268 
 269 bool NativeFarCall::is_call_at(address instr) {
 270   return nativeInstruction_at(instr)->is_sethi();
 271 }
 272 
 273 void NativeFarCall::print() {
 274   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
 275 }
 276 
 277 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
 278   nmethod* callee = CodeCache::find_nmethod(destination());
 279   if (callee == NULL) {
 280     return false;
 281   } else {
 282     return destination() == callee->verified_entry_point();
 283   }
 284 }
 285 
 286 // MT-safe patching of a far call.
 287 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
 288   Unimplemented();
 289 }
 290 
 291 // Code for unit testing implementation of NativeFarCall class
 292 void NativeFarCall::test() {
 293   Unimplemented();
 294 }
 295 // End code for unit testing implementation of NativeFarCall class
 296 
 297 #endif // _LP64
 298 
 299 //-------------------------------------------------------------------
 300 
 301 
 302 void NativeMovConstReg::verify() {
 303   NativeInstruction::verify();
 304   // make sure code pattern is actually a "set_metadata" synthetic instruction
 305   // see MacroAssembler::set_oop()
 306   int i0 = long_at(sethi_offset);
 307   int i1 = long_at(add_offset);
 308 
 309   // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
 310   Register rd = inv_rd(i0);
 311 #ifndef _LP64
 312   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
 313         is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
 314         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
 315         rd == inv_rs1(i1) && rd == inv_rd(i1))) {
 316     fatal("not a set_metadata");
 317   }
 318 #else
 319   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
 320     fatal("not a set_metadata");
 321   }
 322 #endif
 323 }
 324 
 325 
 326 void NativeMovConstReg::print() {
 327   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
 328 }
 329 
 330 
 331 #ifdef _LP64
 332 intptr_t NativeMovConstReg::data() const {
 333   return data64(addr_at(sethi_offset), long_at(add_offset));
 334 }
 335 #else
 336 intptr_t NativeMovConstReg::data() const {
 337   return data32(long_at(sethi_offset), long_at(add_offset));
 338 }
 339 #endif
 340 
 341 
 342 void NativeMovConstReg::set_data(intptr_t x) {
 343 #ifdef _LP64
 344   set_data64_sethi(addr_at(sethi_offset), x);
 345 #else
 346   set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
 347 #endif
 348   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
 349 
 350   // also store the value into an oop_Relocation cell, if any
 351   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 352   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
 353   if (nm != NULL) {
 354     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 355     oop* oop_addr = NULL;
 356     Metadata** metadata_addr = NULL;
 357     while (iter.next()) {
 358       if (iter.type() == relocInfo::oop_type) {
 359         oop_Relocation *r = iter.oop_reloc();
 360         if (oop_addr == NULL) {
 361           oop_addr = r->oop_addr();
 362           *oop_addr = cast_to_oop(x);
 363         } else {
 364           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
 365         }
 366       }
 367       if (iter.type() == relocInfo::metadata_type) {
 368         metadata_Relocation *r = iter.metadata_reloc();
 369         if (metadata_addr == NULL) {
 370           metadata_addr = r->metadata_addr();
 371           *metadata_addr = (Metadata*)x;
 372         } else {
 373           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
 374         }
 375       }
 376     }
 377   }
 378 }
 379 
 380 
 381 // Code for unit testing implementation of NativeMovConstReg class
 382 void NativeMovConstReg::test() {
 383 #ifdef ASSERT
 384   ResourceMark rm;
 385   CodeBuffer cb("test", 100, 100);
 386   MacroAssembler* a = new MacroAssembler(&cb);
 387   NativeMovConstReg* nm;
 388   uint idx;
 389   int offsets[] = {
 390     0x0,
 391     0x7fffffff,
 392     0x80000000,
 393     0xffffffff,
 394     0x20,
 395     4096,
 396     4097,
 397   };
 398 
 399   VM_Version::allow_all();
 400 
 401   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
 402   a->sethi(al1, I3);
 403   a->add(I3, al1.low10(), I3);
 404   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
 405   a->sethi(al2, O2);
 406   a->add(O2, al2.low10(), O2);
 407 
 408   nm = nativeMovConstReg_at( cb.insts_begin() );
 409   nm->print();
 410 
 411   nm = nativeMovConstReg_at( nm->next_instruction_address() );
 412   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 413     nm->set_data( offsets[idx] );
 414     assert(nm->data() == offsets[idx], "check unit test");
 415   }
 416   nm->print();
 417 
 418   VM_Version::revert();
 419 #endif
 420 }
 421 // End code for unit testing implementation of NativeMovConstReg class
 422 
 423 //-------------------------------------------------------------------
 424 
 425 void NativeMovConstRegPatching::verify() {
 426   NativeInstruction::verify();
 427   // Make sure code pattern is sethi/nop/add.
 428   int i0 = long_at(sethi_offset);
 429   int i1 = long_at(nop_offset);
 430   int i2 = long_at(add_offset);
 431   assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 432 
 433   // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
 434   // The casual reader should note that on Sparc a nop is a special case if sethi
 435   // in which the destination register is %g0.
 436   Register rd0 = inv_rd(i0);
 437   Register rd1 = inv_rd(i1);
 438   if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
 439         is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
 440         is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
 441         inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
 442         rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
 443     fatal("not a set_metadata");
 444   }
 445 }
 446 
 447 
 448 void NativeMovConstRegPatching::print() {
 449   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
 450 }
 451 
 452 
 453 int NativeMovConstRegPatching::data() const {
 454 #ifdef _LP64
 455   return data64(addr_at(sethi_offset), long_at(add_offset));
 456 #else
 457   return data32(long_at(sethi_offset), long_at(add_offset));
 458 #endif
 459 }
 460 
 461 
 462 void NativeMovConstRegPatching::set_data(int x) {
 463 #ifdef _LP64
 464   set_data64_sethi(addr_at(sethi_offset), x);
 465 #else
 466   set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
 467 #endif
 468   set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
 469 
 470   // also store the value into an oop_Relocation cell, if any
 471   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 472   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
 473   if (nm != NULL) {
 474     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 475     oop* oop_addr = NULL;
 476     Metadata** metadata_addr = NULL;
 477     while (iter.next()) {
 478       if (iter.type() == relocInfo::oop_type) {
 479         oop_Relocation *r = iter.oop_reloc();
 480         if (oop_addr == NULL) {
 481           oop_addr = r->oop_addr();
 482           *oop_addr = cast_to_oop(x);
 483         } else {
 484           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
 485         }
 486       }
 487       if (iter.type() == relocInfo::metadata_type) {
 488         metadata_Relocation *r = iter.metadata_reloc();
 489         if (metadata_addr == NULL) {
 490           metadata_addr = r->metadata_addr();
 491           *metadata_addr = (Metadata*)x;
 492         } else {
 493           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
 494         }
 495       }
 496     }
 497   }
 498 }
 499 
 500 
 501 // Code for unit testing implementation of NativeMovConstRegPatching class
 502 void NativeMovConstRegPatching::test() {
 503 #ifdef ASSERT
 504   ResourceMark rm;
 505   CodeBuffer cb("test", 100, 100);
 506   MacroAssembler* a = new MacroAssembler(&cb);
 507   NativeMovConstRegPatching* nm;
 508   uint idx;
 509   int offsets[] = {
 510     0x0,
 511     0x7fffffff,
 512     0x80000000,
 513     0xffffffff,
 514     0x20,
 515     4096,
 516     4097,
 517   };
 518 
 519   VM_Version::allow_all();
 520 
 521   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
 522   a->sethi(al1, I3);
 523   a->nop();
 524   a->add(I3, al1.low10(), I3);
 525   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
 526   a->sethi(al2, O2);
 527   a->nop();
 528   a->add(O2, al2.low10(), O2);
 529 
 530   nm = nativeMovConstRegPatching_at( cb.insts_begin() );
 531   nm->print();
 532 
 533   nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
 534   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 535     nm->set_data( offsets[idx] );
 536     assert(nm->data() == offsets[idx], "check unit test");
 537   }
 538   nm->print();
 539 
 540   VM_Version::revert();
 541 #endif // ASSERT
 542 }
 543 // End code for unit testing implementation of NativeMovConstRegPatching class
 544 
 545 
 546 //-------------------------------------------------------------------
 547 
 548 
 549 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
 550   Untested("copy_instruction_to");
 551   int instruction_size = next_instruction_address() - instruction_address();
 552   for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
 553     *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
 554   }
 555 }
 556 
 557 
 558 void NativeMovRegMem::verify() {
 559   NativeInstruction::verify();
 560   // make sure code pattern is actually a "ld" or "st" of some sort.
 561   int i0 = long_at(0);
 562   int op3 = inv_op3(i0);
 563 
 564   assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
 565 
 566   if (!(is_op(i0, Assembler::ldst_op) &&
 567         inv_immed(i0) &&
 568         0 != (op3 < op3_ldst_int_limit
 569          ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 570          : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
 571   {
 572     int i1 = long_at(ldst_offset);
 573     Register rd = inv_rd(i0);
 574 
 575     op3 = inv_op3(i1);
 576     if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
 577          0 != (op3 < op3_ldst_int_limit
 578               ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 579                : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
 580       fatal("not a ld* or st* op");
 581     }
 582   }
 583 }
 584 
 585 
 586 void NativeMovRegMem::print() {
 587   if (is_immediate()) {
 588     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
 589   } else {
 590     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
 591   }
 592 }
 593 
 594 
 595 // Code for unit testing implementation of NativeMovRegMem class
 596 void NativeMovRegMem::test() {
 597 #ifdef ASSERT
 598   ResourceMark rm;
 599   CodeBuffer cb("test", 1000, 1000);
 600   MacroAssembler* a = new MacroAssembler(&cb);
 601   NativeMovRegMem* nm;
 602   uint idx = 0;
 603   uint idx1;
 604   int offsets[] = {
 605     0x0,
 606     0xffffffff,
 607     0x7fffffff,
 608     0x80000000,
 609     4096,
 610     4097,
 611     0x20,
 612     0x4000,
 613   };
 614 
 615   VM_Version::allow_all();
 616 
 617   AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
 618   AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
 619   a->ldsw( G5, al1.low10(), G4 ); idx++;
 620   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 621   a->ldsw( G5, I3, G4 ); idx++;
 622   a->ldsb( G5, al1.low10(), G4 ); idx++;
 623   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 624   a->ldsb( G5, I3, G4 ); idx++;
 625   a->ldsh( G5, al1.low10(), G4 ); idx++;
 626   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 627   a->ldsh( G5, I3, G4 ); idx++;
 628   a->lduw( G5, al1.low10(), G4 ); idx++;
 629   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 630   a->lduw( G5, I3, G4 ); idx++;
 631   a->ldub( G5, al1.low10(), G4 ); idx++;
 632   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 633   a->ldub( G5, I3, G4 ); idx++;
 634   a->lduh( G5, al1.low10(), G4 ); idx++;
 635   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 636   a->lduh( G5, I3, G4 ); idx++;
 637   a->ldx( G5, al1.low10(), G4 ); idx++;
 638   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 639   a->ldx( G5, I3, G4 ); idx++;
 640   a->ldd( G5, al1.low10(), G4 ); idx++;
 641   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 642   a->ldd( G5, I3, G4 ); idx++;
 643   a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
 644   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 645   a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
 646 
 647   a->stw( G5, G4, al1.low10() ); idx++;
 648   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 649   a->stw( G5, G4, I3 ); idx++;
 650   a->stb( G5, G4, al1.low10() ); idx++;
 651   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 652   a->stb( G5, G4, I3 ); idx++;
 653   a->sth( G5, G4, al1.low10() ); idx++;
 654   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 655   a->sth( G5, G4, I3 ); idx++;
 656   a->stx( G5, G4, al1.low10() ); idx++;
 657   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 658   a->stx( G5, G4, I3 ); idx++;
 659   a->std( G5, G4, al1.low10() ); idx++;
 660   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 661   a->std( G5, G4, I3 ); idx++;
 662   a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
 663   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
 664   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
 665 
 666   nm = nativeMovRegMem_at( cb.insts_begin() );
 667   nm->print();
 668   nm->set_offset( low10(0) );
 669   nm->print();
 670   nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 671   nm->print();
 672 
 673   while (--idx) {
 674     nm = nativeMovRegMem_at( nm->next_instruction_address() );
 675     nm->print();
 676     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
 677       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
 678       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
 679              "check unit test");
 680       nm->print();
 681     }
 682     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 683     nm->print();
 684   }
 685 
 686   VM_Version::revert();
 687 #endif // ASSERT
 688 }
 689 
 690 // End code for unit testing implementation of NativeMovRegMem class
 691 
 692 //--------------------------------------------------------------------------------
 693 
 694 
 695 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
 696   Untested("copy_instruction_to");
 697   int instruction_size = next_instruction_address() - instruction_address();
 698   for (int i = 0; i < instruction_size; i += wordSize) {
 699     *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
 700   }
 701 }
 702 
 703 
 704 void NativeMovRegMemPatching::verify() {
 705   NativeInstruction::verify();
 706   // make sure code pattern is actually a "ld" or "st" of some sort.
 707   int i0 = long_at(0);
 708   int op3 = inv_op3(i0);
 709 
 710   assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 711 
 712   if (!(is_op(i0, Assembler::ldst_op) &&
 713         inv_immed(i0) &&
 714         0 != (op3 < op3_ldst_int_limit
 715          ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 716          : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
 717     int i1 = long_at(ldst_offset);
 718     Register rd = inv_rd(i0);
 719 
 720     op3 = inv_op3(i1);
 721     if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
 722          0 != (op3 < op3_ldst_int_limit
 723               ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
 724               : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
 725       fatal("not a ld* or st* op");
 726     }
 727   }
 728 }
 729 
 730 
 731 void NativeMovRegMemPatching::print() {
 732   if (is_immediate()) {
 733     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
 734   } else {
 735     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
 736   }
 737 }
 738 
 739 
 740 // Code for unit testing implementation of NativeMovRegMemPatching class
 741 void NativeMovRegMemPatching::test() {
 742 #ifdef ASSERT
 743   ResourceMark rm;
 744   CodeBuffer cb("test", 1000, 1000);
 745   MacroAssembler* a = new MacroAssembler(&cb);
 746   NativeMovRegMemPatching* nm;
 747   uint idx = 0;
 748   uint idx1;
 749   int offsets[] = {
 750     0x0,
 751     0xffffffff,
 752     0x7fffffff,
 753     0x80000000,
 754     4096,
 755     4097,
 756     0x20,
 757     0x4000,
 758   };
 759 
 760   VM_Version::allow_all();
 761 
 762   AddressLiteral al(0xffffffff, relocInfo::external_word_type);
 763   a->ldsw( G5, al.low10(), G4); idx++;
 764   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 765   a->ldsw( G5, I3, G4 ); idx++;
 766   a->ldsb( G5, al.low10(), G4); idx++;
 767   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 768   a->ldsb( G5, I3, G4 ); idx++;
 769   a->ldsh( G5, al.low10(), G4); idx++;
 770   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 771   a->ldsh( G5, I3, G4 ); idx++;
 772   a->lduw( G5, al.low10(), G4); idx++;
 773   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 774   a->lduw( G5, I3, G4 ); idx++;
 775   a->ldub( G5, al.low10(), G4); idx++;
 776   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 777   a->ldub( G5, I3, G4 ); idx++;
 778   a->lduh( G5, al.low10(), G4); idx++;
 779   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 780   a->lduh( G5, I3, G4 ); idx++;
 781   a->ldx(  G5, al.low10(), G4); idx++;
 782   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 783   a->ldx(  G5, I3, G4 ); idx++;
 784   a->ldd(  G5, al.low10(), G4); idx++;
 785   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 786   a->ldd(  G5, I3, G4 ); idx++;
 787   a->ldf(  FloatRegisterImpl::D, O2, -1, F14 ); idx++;
 788   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 789   a->ldf(  FloatRegisterImpl::S, O0, I3, F15 ); idx++;
 790 
 791   a->stw( G5, G4, al.low10()); idx++;
 792   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 793   a->stw( G5, G4, I3 ); idx++;
 794   a->stb( G5, G4, al.low10()); idx++;
 795   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 796   a->stb( G5, G4, I3 ); idx++;
 797   a->sth( G5, G4, al.low10()); idx++;
 798   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 799   a->sth( G5, G4, I3 ); idx++;
 800   a->stx( G5, G4, al.low10()); idx++;
 801   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 802   a->stx( G5, G4, I3 ); idx++;
 803   a->std( G5, G4, al.low10()); idx++;
 804   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 805   a->std( G5, G4, I3 ); idx++;
 806   a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
 807   a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
 808   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
 809 
 810   nm = nativeMovRegMemPatching_at( cb.insts_begin() );
 811   nm->print();
 812   nm->set_offset( low10(0) );
 813   nm->print();
 814   nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 815   nm->print();
 816 
 817   while (--idx) {
 818     nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
 819     nm->print();
 820     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
 821       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
 822       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
 823              "check unit test");
 824       nm->print();
 825     }
 826     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
 827     nm->print();
 828   }
 829 
 830   VM_Version::revert();
 831 #endif // ASSERT
 832 }
 833 // End code for unit testing implementation of NativeMovRegMemPatching class
 834 
 835 
 836 //--------------------------------------------------------------------------------
 837 
 838 
 839 void NativeJump::verify() {
 840   NativeInstruction::verify();
 841   int i0 = long_at(sethi_offset);
 842   int i1 = long_at(jmpl_offset);
 843   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
 844   // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
 845   Register rd = inv_rd(i0);
 846 #ifndef _LP64
 847   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
 848         (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
 849         (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
 850         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
 851         rd == inv_rs1(i1))) {
 852     fatal("not a jump_to instruction");
 853   }
 854 #else
 855   // In LP64, the jump instruction location varies for non relocatable
 856   // jumps, for example is could be sethi, xor, jmp instead of the
 857   // 7 instructions for sethi.  So let's check sethi only.
 858   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
 859     fatal("not a jump_to instruction");
 860   }
 861 #endif
 862 }
 863 
 864 
 865 void NativeJump::print() {
 866   tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
 867 }
 868 
 869 
 870 // Code for unit testing implementation of NativeJump class
 871 void NativeJump::test() {
 872 #ifdef ASSERT
 873   ResourceMark rm;
 874   CodeBuffer cb("test", 100, 100);
 875   MacroAssembler* a = new MacroAssembler(&cb);
 876   NativeJump* nj;
 877   uint idx;
 878   int offsets[] = {
 879     0x0,
 880     0xffffffff,
 881     0x7fffffff,
 882     0x80000000,
 883     4096,
 884     4097,
 885     0x20,
 886     0x4000,
 887   };
 888 
 889   VM_Version::allow_all();
 890 
 891   AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
 892   a->sethi(al, I3);
 893   a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
 894   a->delayed()->nop();
 895   a->sethi(al, I3);
 896   a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
 897   a->delayed()->nop();
 898 
 899   nj = nativeJump_at( cb.insts_begin() );
 900   nj->print();
 901 
 902   nj = nativeJump_at( nj->next_instruction_address() );
 903   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
 904     nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
 905     assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
 906     nj->print();
 907   }
 908 
 909   VM_Version::revert();
 910 #endif // ASSERT
 911 }
 912 // End code for unit testing implementation of NativeJump class
 913 
 914 
 915 void NativeJump::insert(address code_pos, address entry) {
 916   Unimplemented();
 917 }
 918 
 919 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
 920 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
 921 // Atomic write can be only with 1 word.
 922 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 923   // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
 924   // in the header of the nmethod, within a short branch's span of the patch point.
 925   // Set up the jump sequence using NativeJump::insert, and then use an annulled
 926   // unconditional branch at the target site (an atomic 1-word update).
 927   // Limitations:  You can only patch nmethods, with any given nmethod patched at
 928   // most once, and the patch must be in the nmethod's header.
 929   // It's messy, but you can ask the CodeCache for the nmethod containing the
 930   // target address.
 931 
 932   // %%%%% For now, do something MT-stupid:
 933   ResourceMark rm;
 934   int code_size = 1 * BytesPerInstWord;
 935   CodeBuffer cb(verified_entry, code_size + 1);
 936   MacroAssembler* a = new MacroAssembler(&cb);
 937   a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
 938   ICache::invalidate_range(verified_entry, code_size);
 939 }
 940 
 941 
 942 void NativeIllegalInstruction::insert(address code_pos) {
 943   NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
 944   nii->set_long_at(0, illegal_instruction());
 945 }
 946 
 947 static int illegal_instruction_bits = 0;
 948 
 949 int NativeInstruction::illegal_instruction() {
 950   if (illegal_instruction_bits == 0) {
 951     ResourceMark rm;
 952     char buf[40];
 953     CodeBuffer cbuf((address)&buf[0], 20);
 954     MacroAssembler* a = new MacroAssembler(&cbuf);
 955     address ia = a->pc();
 956     a->trap(ST_RESERVED_FOR_USER_0 + 1);
 957     int bits = *(int*)ia;
 958     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
 959     illegal_instruction_bits = bits;
 960     assert(illegal_instruction_bits != 0, "oops");
 961   }
 962   return illegal_instruction_bits;
 963 }
 964 
 965 static int ic_miss_trap_bits = 0;
 966 
 967 bool NativeInstruction::is_ic_miss_trap() {
 968   if (ic_miss_trap_bits == 0) {
 969     ResourceMark rm;
 970     char buf[40];
 971     CodeBuffer cbuf((address)&buf[0], 20);
 972     MacroAssembler* a = new MacroAssembler(&cbuf);
 973     address ia = a->pc();
 974     a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
 975     int bits = *(int*)ia;
 976     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
 977     ic_miss_trap_bits = bits;
 978     assert(ic_miss_trap_bits != 0, "oops");
 979   }
 980   return long_at(0) == ic_miss_trap_bits;
 981 }
 982 
 983 
 984 bool NativeInstruction::is_illegal() {
 985   if (illegal_instruction_bits == 0) {
 986     return false;
 987   }
 988   return long_at(0) == illegal_instruction_bits;
 989 }
 990 
 991 
 992 void NativeGeneralJump::verify() {
 993   assert(((NativeInstruction *)this)->is_jump() ||
 994          ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
 995 }
 996 
 997 
 998 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 999   Assembler::Condition condition = Assembler::always;
1000   int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
1001     Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
1002   NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
1003   ni->set_long_at(0, x);
1004 }
1005 
1006 
1007 // MT-safe patching of a jmp instruction (and following word).
1008 // First patches the second word, and then atomicly replaces
1009 // the first word with the first new instruction word.
1010 // Other processors might briefly see the old first word
1011 // followed by the new second word.  This is OK if the old
1012 // second word is harmless, and the new second word may be
1013 // harmlessly executed in the delay slot of the call.
1014 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
1015    assert(Patching_lock->is_locked() ||
1016          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
1017    assert (instr_addr != NULL, "illegal address for code patching");
1018    NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
1019    assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
1020    int i0 = ((int*)code_buffer)[0];
1021    int i1 = ((int*)code_buffer)[1];
1022    int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
1023    assert(inv_op(*contention_addr) == Assembler::arith_op ||
1024           *contention_addr == nop_instruction(),
1025           "must not interfere with original call");
1026    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
1027    h_jump->set_long_at(1*BytesPerInstWord, i1);
1028    h_jump->set_long_at(0*BytesPerInstWord, i0);
1029    // NOTE:  It is possible that another thread T will execute
1030    // only the second patched word.
1031    // In other words, since the original instruction is this
1032    //    jmp patching_stub; nop                    (NativeGeneralJump)
1033    // and the new sequence from the buffer is this:
1034    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
1035    // what T will execute is this:
1036    //    jmp patching_stub; add %r, %lo(K), %r
1037    // thereby putting garbage into %r before calling the patching stub.
1038    // This is OK, because the patching stub ignores the value of %r.
1039 
1040    // Make sure the first-patched instruction, which may co-exist
1041    // briefly with the call, will do something harmless.
1042    assert(inv_op(*contention_addr) == Assembler::arith_op ||
1043           *contention_addr == nop_instruction(),
1044           "must not interfere with original call");
1045 }