1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "nativeInst_sparc.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "utilities/macros.hpp"
  34 #include "vmreg_sparc.inline.hpp"
  35 
  36 #define __ ce->masm()->
  37 
  38 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
  39                                bool throw_index_out_of_bounds_exception)
  40   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
  41   , _index(index)
  42 {
  43   assert(info != NULL, "must have info");
  44   _info = new CodeEmitInfo(info);
  45 }
  46 
  47 
  48 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  49   __ bind(_entry);
  50 
  51   if (_info->deoptimize_on_exception()) {
  52     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
  53     __ call(a, relocInfo::runtime_call_type);
  54     __ delayed()->nop();
  55     ce->add_call_info_here(_info);
  56     ce->verify_oop_map(_info);
  57     debug_only(__ should_not_reach_here());
  58     return;
  59   }
  60 
  61   if (_index->is_register()) {
  62     __ mov(_index->as_register(), G4);
  63   } else {
  64     __ set(_index->as_jint(), G4);
  65   }
  66   if (_throw_index_out_of_bounds_exception) {
  67     __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
  68   } else {
  69     __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
  70   }
  71   __ delayed()->nop();
  72   ce->add_call_info_here(_info);
  73   ce->verify_oop_map(_info);
  74   debug_only(__ should_not_reach_here());
  75 }
  76 
  77 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  78   _info = new CodeEmitInfo(info);
  79 }
  80 
  81 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
  82   __ bind(_entry);
  83   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
  84   __ call(a, relocInfo::runtime_call_type);
  85   __ delayed()->nop();
  86   ce->add_call_info_here(_info);
  87   ce->verify_oop_map(_info);
  88   debug_only(__ should_not_reach_here());
  89 }
  90 
  91 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  92   __ bind(_entry);
  93   __ set(_bci, G4);
  94   Metadata *m = _method->as_constant_ptr()->as_metadata();
  95   __ set_metadata_constant(m, G5);
  96   __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
  97   __ delayed()->nop();
  98   ce->add_call_info_here(_info);
  99   ce->verify_oop_map(_info);
 100 
 101   __ br(Assembler::always, true, Assembler::pt, _continuation);
 102   __ delayed()->nop();
 103 }
 104 
 105 
 106 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 107   if (_offset != -1) {
 108     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 109   }
 110   __ bind(_entry);
 111   __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
 112   __ delayed()->nop();
 113   ce->add_call_info_here(_info);
 114   ce->verify_oop_map(_info);
 115 #ifdef ASSERT
 116   __ should_not_reach_here();
 117 #endif
 118 }
 119 
 120 
 121 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 122   address a;
 123   if (_info->deoptimize_on_exception()) {
 124     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 125     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 126   } else {
 127     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 128   }
 129 
 130   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 131   __ bind(_entry);
 132   __ call(a, relocInfo::runtime_call_type);
 133   __ delayed()->nop();
 134   ce->add_call_info_here(_info);
 135   ce->verify_oop_map(_info);
 136 #ifdef ASSERT
 137   __ should_not_reach_here();
 138 #endif
 139 }
 140 
 141 
 142 // Implementation of SimpleExceptionStub
 143 // Note: %g1 and %g3 are already in use
 144 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 145   __ bind(_entry);
 146   __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
 147 
 148   if (_obj->is_valid()) {
 149     __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub
 150   } else {
 151     __ delayed()->mov(G0, G4);
 152   }
 153   ce->add_call_info_here(_info);
 154 #ifdef ASSERT
 155   __ should_not_reach_here();
 156 #endif
 157 }
 158 
 159 
 160 // Implementation of NewInstanceStub
 161 
 162 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 163   _result = result;
 164   _klass = klass;
 165   _klass_reg = klass_reg;
 166   _info = new CodeEmitInfo(info);
 167   assert(stub_id == Runtime1::new_instance_id                 ||
 168          stub_id == Runtime1::fast_new_instance_id            ||
 169          stub_id == Runtime1::fast_new_instance_init_check_id,
 170          "need new_instance id");
 171   _stub_id   = stub_id;
 172 }
 173 
 174 
 175 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 176   __ bind(_entry);
 177   __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
 178   __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
 179   ce->add_call_info_here(_info);
 180   ce->verify_oop_map(_info);
 181   __ br(Assembler::always, false, Assembler::pt, _continuation);
 182   __ delayed()->mov_or_nop(O0, _result->as_register());
 183 }
 184 
 185 
 186 // Implementation of NewTypeArrayStub
 187 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 188   _klass_reg = klass_reg;
 189   _length = length;
 190   _result = result;
 191   _info = new CodeEmitInfo(info);
 192 }
 193 
 194 
 195 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 196   __ bind(_entry);
 197 
 198   __ mov(_length->as_register(), G4);
 199   __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
 200   __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
 201   ce->add_call_info_here(_info);
 202   ce->verify_oop_map(_info);
 203   __ br(Assembler::always, false, Assembler::pt, _continuation);
 204   __ delayed()->mov_or_nop(O0, _result->as_register());
 205 }
 206 
 207 
 208 // Implementation of NewObjectArrayStub
 209 
 210 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 211   _klass_reg = klass_reg;
 212   _length = length;
 213   _result = result;
 214   _info = new CodeEmitInfo(info);
 215 }
 216 
 217 
 218 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 219   __ bind(_entry);
 220 
 221   __ mov(_length->as_register(), G4);
 222   __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
 223   __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
 224   ce->add_call_info_here(_info);
 225   ce->verify_oop_map(_info);
 226   __ br(Assembler::always, false, Assembler::pt, _continuation);
 227   __ delayed()->mov_or_nop(O0, _result->as_register());
 228 }
 229 
 230 
 231 // Implementation of MonitorAccessStubs
 232 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 233   : MonitorAccessStub(obj_reg, lock_reg) {
 234   _info = new CodeEmitInfo(info);
 235 }
 236 
 237 
 238 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 239   __ bind(_entry);
 240   __ mov(_obj_reg->as_register(), G4);
 241   if (ce->compilation()->has_fpu_code()) {
 242     __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
 243   } else {
 244     __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
 245   }
 246   __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
 247   ce->add_call_info_here(_info);
 248   ce->verify_oop_map(_info);
 249   __ br(Assembler::always, true, Assembler::pt, _continuation);
 250   __ delayed()->nop();
 251 }
 252 
 253 
 254 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 255   __ bind(_entry);
 256   if (_compute_lock) {
 257     ce->monitor_address(_monitor_ix, _lock_reg);
 258   }
 259   if (ce->compilation()->has_fpu_code()) {
 260     __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
 261   } else {
 262     __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
 263   }
 264 
 265   __ delayed()->mov_or_nop(_lock_reg->as_register(), G4);
 266   __ br(Assembler::always, true, Assembler::pt, _continuation);
 267   __ delayed()->nop();
 268 }
 269 
 270 // Implementation of patching:
 271 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 272 // - Replace original code with a call to the stub
 273 // At Runtime:
 274 // - call to stub, jump to runtime
 275 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
 276 // - in runtime: after initializing class, restore original code, reexecute instruction
 277 
 278 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 279 
 280 void PatchingStub::align_patch_site(MacroAssembler* ) {
 281   // patch sites on sparc are always properly aligned.
 282 }
 283 
 284 void PatchingStub::emit_code(LIR_Assembler* ce) {
 285   // copy original code here
 286   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 287          "not enough room for call");
 288   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
 289 
 290   Label call_patch;
 291 
 292   int being_initialized_entry = __ offset();
 293 
 294   if (_id == load_klass_id) {
 295     // produce a copy of the load klass instruction for use by the being initialized case
 296 #ifdef ASSERT
 297     address start = __ pc();
 298 #endif
 299     AddressLiteral addrlit(NULL, metadata_Relocation::spec(_index));
 300     __ patchable_set(addrlit, _obj);
 301 
 302 #ifdef ASSERT
 303     for (int i = 0; i < _bytes_to_copy; i++) {
 304       address ptr = (address)(_pc_start + i);
 305       int a_byte = (*ptr) & 0xFF;
 306       assert(a_byte == *start++, "should be the same code");
 307     }
 308 #endif
 309   } else if (_id == load_mirror_id || _id == load_appendix_id) {
 310     // produce a copy of the load mirror instruction for use by the being initialized case
 311 #ifdef ASSERT
 312     address start = __ pc();
 313 #endif
 314     AddressLiteral addrlit(NULL, oop_Relocation::spec(_index));
 315     __ patchable_set(addrlit, _obj);
 316 
 317 #ifdef ASSERT
 318     for (int i = 0; i < _bytes_to_copy; i++) {
 319       address ptr = (address)(_pc_start + i);
 320       int a_byte = (*ptr) & 0xFF;
 321       assert(a_byte == *start++, "should be the same code");
 322     }
 323 #endif
 324   } else {
 325     // make a copy the code which is going to be patched.
 326     for (int i = 0; i < _bytes_to_copy; i++) {
 327       address ptr = (address)(_pc_start + i);
 328       int a_byte = (*ptr) & 0xFF;
 329       __ emit_int8 (a_byte);
 330     }
 331   }
 332 
 333   address end_of_patch = __ pc();
 334   int bytes_to_skip = 0;
 335   if (_id == load_mirror_id) {
 336     int offset = __ offset();
 337     if (CommentedAssembly) {
 338       __ block_comment(" being_initialized check");
 339     }
 340 
 341     // static field accesses have special semantics while the class
 342     // initializer is being run so we emit a test which can be used to
 343     // check that this code is being executed by the initializing
 344     // thread.
 345     assert(_obj != noreg, "must be a valid register");
 346     assert(_index >= 0, "must have oop index");
 347     __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
 348     __ ld_ptr(G3, in_bytes(InstanceKlass::init_thread_offset()), G3);
 349     __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
 350 
 351     // load_klass patches may execute the patched code before it's
 352     // copied back into place so we need to jump back into the main
 353     // code of the nmethod to continue execution.
 354     __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
 355     __ delayed()->nop();
 356 
 357     // make sure this extra code gets skipped
 358     bytes_to_skip += __ offset() - offset;
 359   }
 360 
 361   // Now emit the patch record telling the runtime how to find the
 362   // pieces of the patch.  We only need 3 bytes but it has to be
 363   // aligned as an instruction so emit 4 bytes.
 364   int sizeof_patch_record = 4;
 365   bytes_to_skip += sizeof_patch_record;
 366 
 367   // emit the offsets needed to find the code to patch
 368   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 369 
 370   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte
 371   __ emit_int8(0);
 372   __ emit_int8(being_initialized_entry_offset);
 373   __ emit_int8(bytes_to_skip);
 374   __ emit_int8(_bytes_to_copy);
 375   address patch_info_pc = __ pc();
 376   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 377 
 378   address entry = __ pc();
 379   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 380   address target = NULL;
 381   relocInfo::relocType reloc_type = relocInfo::none;
 382   switch (_id) {
 383     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 384     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 385     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 386     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 387     default: ShouldNotReachHere();
 388   }
 389   __ bind(call_patch);
 390 
 391   if (CommentedAssembly) {
 392     __ block_comment("patch entry point");
 393   }
 394   __ call(target, relocInfo::runtime_call_type);
 395   __ delayed()->nop();
 396   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 397   ce->add_call_info_here(_info);
 398   __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
 399   __ delayed()->nop();
 400   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 401     CodeSection* cs = __ code_section();
 402     address pc = (address)_pc_start;
 403     RelocIterator iter(cs, pc, pc + 1);
 404     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
 405 
 406     pc = (address)(_pc_start + NativeMovConstReg::add_offset);
 407     RelocIterator iter2(cs, pc, pc+1);
 408     relocInfo::change_reloc_info_for_address(&iter2, (address) pc, reloc_type, relocInfo::none);
 409   }
 410 
 411 }
 412 
 413 
 414 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 415   __ bind(_entry);
 416   __ set(_trap_request, G4);
 417   __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
 418   __ delayed()->nop();
 419   ce->add_call_info_here(_info);
 420   DEBUG_ONLY(__ should_not_reach_here());
 421 }
 422 
 423 
 424 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 425   //---------------slow case: call to native-----------------
 426   __ bind(_entry);
 427   __ mov(src()->as_register(),     O0);
 428   __ mov(src_pos()->as_register(), O1);
 429   __ mov(dst()->as_register(),     O2);
 430   __ mov(dst_pos()->as_register(), O3);
 431   __ mov(length()->as_register(),  O4);
 432 
 433   ce->emit_static_call_stub();
 434   if (ce->compilation()->bailed_out()) {
 435     return; // CodeCache is full
 436   }
 437 
 438   __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
 439   __ delayed()->nop();
 440   ce->add_call_info_here(info());
 441   ce->verify_oop_map(info());
 442 
 443 #ifndef PRODUCT
 444   __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0);
 445   __ ld(O0, 0, O1);
 446   __ inc(O1);
 447   __ st(O1, 0, O0);
 448 #endif
 449 
 450   __ br(Assembler::always, false, Assembler::pt, _continuation);
 451   __ delayed()->nop();
 452 }
 453 
 454 #undef __