1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)c1_CodeStubs_x86.cpp 1.101 07/09/17 09:25:57 JVM"
   3 #endif
   4 /*
   5  * Copyright 1999-2006 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 #include "incls/_precompiled.incl"
  29 #include "incls/_c1_CodeStubs_x86.cpp.incl"
  30 
  31 
  32 #define __ ce->masm()->
  33 
  34 float ConversionStub::float_zero = 0.0;
  35 double ConversionStub::double_zero = 0.0;
  36 
  37 void ConversionStub::emit_code(LIR_Assembler* ce) {
  38   __ bind(_entry);
  39   assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
  40 
  41 
  42   if (input()->is_single_xmm()) {
  43     __ comiss(input()->as_xmm_float_reg(),
  44               ExternalAddress((address)&float_zero));
  45   } else if (input()->is_double_xmm()) {
  46     __ comisd(input()->as_xmm_double_reg(),
  47               ExternalAddress((address)&double_zero));
  48   } else {
  49     __ pushl(rax);
  50     __ ftst();
  51     __ fnstsw_ax();
  52     __ sahf();
  53     __ popl(rax);
  54   }
  55 
  56   Label NaN, do_return;
  57   __ jccb(Assembler::parity, NaN);
  58   __ jccb(Assembler::below, do_return);
  59 
  60   // input is > 0 -> return maxInt
  61   // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
  62   __ decrement(result()->as_register()); 
  63   __ jmpb(do_return);
  64 
  65   // input is NaN -> return 0
  66   __ bind(NaN);
  67   __ xorl(result()->as_register(), result()->as_register());
  68 
  69   __ bind(do_return);
  70   __ jmp(_continuation);
  71 }
  72 
  73 #ifdef TIERED
  74 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  75   __ bind(_entry);
  76   ce->store_parameter(_bci, 0);
  77   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
  78   ce->add_call_info_here(_info);
  79   ce->verify_oop_map(_info);
  80 
  81   __ jmp(_continuation);
  82 }
  83 #endif // TIERED
  84 
  85 
  86 
  87 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
  88                                bool throw_index_out_of_bounds_exception)
  89   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
  90   , _index(index)
  91 { 
  92   _info = info == NULL ? NULL : new CodeEmitInfo(info);
  93 }
  94 
  95 
  96 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  97   __ bind(_entry);
  98   // pass the array index on stack because all registers must be preserved
  99   if (_index->is_cpu_register()) {
 100     ce->store_parameter(_index->as_register(), 0);
 101   } else {
 102     ce->store_parameter(_index->as_jint(), 0);
 103   }
 104   Runtime1::StubID stub_id;
 105   if (_throw_index_out_of_bounds_exception) {
 106     stub_id = Runtime1::throw_index_exception_id;
 107   } else {
 108     stub_id = Runtime1::throw_range_check_failed_id;
 109   }
 110   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
 111   ce->add_call_info_here(_info);
 112   debug_only(__ should_not_reach_here());
 113 }
 114 
 115 
 116 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 117   if (_offset != -1) {
 118     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 119   }
 120   __ bind(_entry);
 121   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
 122   ce->add_call_info_here(_info);
 123   debug_only(__ should_not_reach_here());
 124 }
 125 
 126 
 127 // Implementation of NewInstanceStub
 128 
 129 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 130   _result = result; 
 131   _klass = klass;
 132   _klass_reg = klass_reg;
 133   _info = new CodeEmitInfo(info);
 134   assert(stub_id == Runtime1::new_instance_id                 ||
 135          stub_id == Runtime1::fast_new_instance_id            ||
 136          stub_id == Runtime1::fast_new_instance_init_check_id,
 137          "need new_instance id");
 138   _stub_id   = stub_id;
 139 }
 140 
 141 
 142 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 143   assert(__ rsp_offset() == 0, "frame size should be fixed");
 144   __ bind(_entry);
 145   __ movl(rdx, _klass_reg->as_register());
 146   __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
 147   ce->add_call_info_here(_info);
 148   ce->verify_oop_map(_info);
 149   assert(_result->as_register() == rax, "result must in rax,");
 150   __ jmp(_continuation);
 151 }
 152 
 153 
 154 // Implementation of NewTypeArrayStub
 155 
 156 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 157   _klass_reg = klass_reg;
 158   _length = length;
 159   _result = result;
 160   _info = new CodeEmitInfo(info);
 161 }
 162 
 163 
 164 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 165   assert(__ rsp_offset() == 0, "frame size should be fixed");
 166   __ bind(_entry);
 167   assert(_length->as_register() == rbx, "length must in rbx,");
 168   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 169   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
 170   ce->add_call_info_here(_info);
 171   ce->verify_oop_map(_info);
 172   assert(_result->as_register() == rax, "result must in rax,");
 173   __ jmp(_continuation);
 174 }
 175 
 176 
 177 // Implementation of NewObjectArrayStub
 178 
 179 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {  
 180   _klass_reg = klass_reg;
 181   _result = result;
 182   _length = length;
 183   _info = new CodeEmitInfo(info);
 184 }
 185 
 186 
 187 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 188   assert(__ rsp_offset() == 0, "frame size should be fixed");
 189   __ bind(_entry);
 190   assert(_length->as_register() == rbx, "length must in rbx,");
 191   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
 192   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
 193   ce->add_call_info_here(_info);
 194   ce->verify_oop_map(_info);
 195   assert(_result->as_register() == rax, "result must in rax,");
 196   __ jmp(_continuation);
 197 }
 198 
 199 
 200 // Implementation of MonitorAccessStubs
 201 
 202 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 203 : MonitorAccessStub(obj_reg, lock_reg)
 204 {
 205   _info = new CodeEmitInfo(info);
 206 }
 207 
 208 
 209 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 210   assert(__ rsp_offset() == 0, "frame size should be fixed");
 211   __ bind(_entry);
 212   ce->store_parameter(_obj_reg->as_register(),  1);
 213   ce->store_parameter(_lock_reg->as_register(), 0);
 214   Runtime1::StubID enter_id;
 215   if (ce->compilation()->has_fpu_code()) {
 216     enter_id = Runtime1::monitorenter_id;
 217   } else {
 218     enter_id = Runtime1::monitorenter_nofpu_id;
 219   }
 220   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
 221   ce->add_call_info_here(_info);
 222   ce->verify_oop_map(_info);
 223   __ jmp(_continuation);
 224 }
 225 
 226 
 227 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 228   __ bind(_entry);
 229   if (_compute_lock) {
 230     // lock_reg was destroyed by fast unlocking attempt => recompute it
 231     ce->monitor_address(_monitor_ix, _lock_reg);
 232   }
 233   ce->store_parameter(_lock_reg->as_register(), 0);
 234   // note: non-blocking leaf routine => no call info needed
 235   Runtime1::StubID exit_id;
 236   if (ce->compilation()->has_fpu_code()) {
 237     exit_id = Runtime1::monitorexit_id;
 238   } else {
 239     exit_id = Runtime1::monitorexit_nofpu_id;
 240   }
 241   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
 242   __ jmp(_continuation);
 243 }
 244 
 245 
 246 // Implementation of patching: 
 247 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 248 // - Replace original code with a call to the stub
 249 // At Runtime:
 250 // - call to stub, jump to runtime 
 251 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 252 // - in runtime: after initializing class, restore original code, reexecute instruction 
 253 
 254 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 255 
 256 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 257   // We're patching a 5-7 byte instruction on intel and we need to
 258   // make sure that we don't see a piece of the instruction.  It
 259   // appears mostly impossible on Intel to simply invalidate other
 260   // processors caches and since they may do aggressive prefetch it's
 261   // very hard to make a guess about what code might be in the icache.
 262   // Force the instruction to be double word aligned so that it
 263   // doesn't span a cache line.
 264   masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
 265 }
 266 
 267 void PatchingStub::emit_code(LIR_Assembler* ce) {
 268   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
 269 
 270   Label call_patch;
 271 
 272   // static field accesses have special semantics while the class
 273   // initializer is being run so we emit a test which can be used to
 274   // check that this code is being executed by the initializing
 275   // thread.
 276   address being_initialized_entry = __ pc();
 277   if (CommentedAssembly) {
 278     __ block_comment(" patch template");
 279   }
 280   if (_id == load_klass_id) {
 281     // produce a copy of the load klass instruction for use by the being initialized case
 282     address start = __ pc();
 283     jobject o = NULL;
 284     __ movoop(_obj, o);
 285 #ifdef ASSERT
 286     for (int i = 0; i < _bytes_to_copy; i++) {
 287       address ptr = (address)(_pc_start + i);
 288       int a_byte = (*ptr) & 0xFF;
 289       assert(a_byte == *start++, "should be the same code");
 290     }
 291 #endif
 292   } else {
 293     // make a copy the code which is going to be patched.
 294     for ( int i = 0; i < _bytes_to_copy; i++) {
 295       address ptr = (address)(_pc_start + i);
 296       int a_byte = (*ptr) & 0xFF;
 297       __ a_byte (a_byte);
 298       *ptr = 0x90; // make the site look like a nop
 299     }
 300   }
 301 
 302   address end_of_patch = __ pc();
 303   int bytes_to_skip = 0;
 304   if (_id == load_klass_id) {
 305     int offset = __ offset();
 306     if (CommentedAssembly) {
 307       __ block_comment(" being_initialized check");
 308     }
 309     assert(_obj != noreg, "must be a valid register");
 310     Register tmp = rax;
 311     if (_obj == tmp) tmp = rbx;
 312     __ pushl(tmp);
 313     __ get_thread(tmp);
 314     __ cmpl(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
 315     __ popl(tmp);
 316     __ jcc(Assembler::notEqual, call_patch);
 317 
 318     // access_field patches may execute the patched code before it's
 319     // copied back into place so we need to jump back into the main
 320     // code of the nmethod to continue execution.
 321     __ jmp(_patch_site_continuation);
 322 
 323     // make sure this extra code gets skipped
 324     bytes_to_skip += __ offset() - offset;
 325   }
 326   if (CommentedAssembly) {
 327     __ block_comment("patch data encoded as movl");
 328   }
 329   // Now emit the patch record telling the runtime how to find the
 330   // pieces of the patch.  We only need 3 bytes but for readability of
 331   // the disassembly we make the data look like a movl reg, imm32,
 332   // which requires 5 bytes
 333   int sizeof_patch_record = 5;
 334   bytes_to_skip += sizeof_patch_record;
 335 
 336   // emit the offsets needed to find the code to patch
 337   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 338 
 339   __ a_byte(0xB8);
 340   __ a_byte(0);
 341   __ a_byte(being_initialized_entry_offset);
 342   __ a_byte(bytes_to_skip);
 343   __ a_byte(_bytes_to_copy);
 344   address patch_info_pc = __ pc();
 345   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 346 
 347   address entry = __ pc();
 348   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 349   address target = NULL;
 350   switch (_id) {
 351     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 352     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
 353     default: ShouldNotReachHere();
 354   }
 355   __ bind(call_patch);
 356 
 357   if (CommentedAssembly) {
 358     __ block_comment("patch entry point");
 359   }
 360   __ call(RuntimeAddress(target));
 361   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 362   ce->add_call_info_here(_info);
 363   int jmp_off = __ offset();
 364   __ jmp(_patch_site_entry);
 365   // Add enough nops so deoptimization can overwrite the jmp above with a call
 366   // and not destroy the world.
 367   for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
 368     __ nop();
 369   }
 370   if (_id == load_klass_id) {
 371     CodeSection* cs = __ code_section();
 372     RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
 373     relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none);
 374   }
 375 }
 376 
 377 
 378 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 379   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 380   __ bind(_entry);
 381   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
 382   ce->add_call_info_here(_info);
 383   debug_only(__ should_not_reach_here());
 384 }
 385 
 386 
 387 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 388   assert(__ rsp_offset() == 0, "frame size should be fixed");
 389   
 390   __ bind(_entry);
 391   // pass the object on stack because all registers must be preserved
 392   if (_obj->is_cpu_register()) {
 393     ce->store_parameter(_obj->as_register(), 0);
 394   }
 395   __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
 396   ce->add_call_info_here(_info);
 397   debug_only(__ should_not_reach_here());
 398 }
 399 
 400 
 401 ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info):
 402   _info(info) {
 403 }
 404 
 405 
 406 void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) {
 407   assert(__ rsp_offset() == 0, "frame size should be fixed");
 408   __ bind(_entry);
 409   __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_array_store_exception_id)));
 410   ce->add_call_info_here(_info);
 411   debug_only(__ should_not_reach_here());
 412 }
 413 
 414 
 415 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 416   //---------------slow case: call to native-----------------
 417   __ bind(_entry);
 418   // Figure out where the args should go
 419   // This should really convert the IntrinsicID to the methodOop and signature
 420   // but I don't know how to do that.
 421   //
 422   VMRegPair args[5];
 423   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
 424   SharedRuntime::java_calling_convention(signature, args, 5, true);
 425 
 426   // push parameters
 427   // (src, src_pos, dest, destPos, length)
 428   Register r[5];
 429   r[0] = src()->as_register();
 430   r[1] = src_pos()->as_register();
 431   r[2] = dst()->as_register();
 432   r[3] = dst_pos()->as_register();
 433   r[4] = length()->as_register();
 434 
 435   // next registers will get stored on the stack
 436   for (int i = 0; i < 5 ; i++ ) {
 437     VMReg r_1 = args[i].first();
 438     if (r_1->is_stack()) {
 439       int st_off = r_1->reg2stack() * wordSize;
 440       __ movl (Address(rsp, st_off), r[i]);
 441     } else {
 442       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
 443     }
 444   }
 445 
 446   ce->align_call(lir_static_call);
 447 
 448   ce->emit_static_call_stub();
 449   AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
 450                          relocInfo::static_call_type);
 451   __ call(resolve);
 452   ce->add_call_info_here(info());
 453 
 454 #ifndef PRODUCT
 455   __ increment(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
 456 #endif
 457   
 458   __ jmp(_continuation);
 459 }
 460 
 461 
 462 #undef __