1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "nativeInst_sparc.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "utilities/macros.hpp"
  34 #include "vmreg_sparc.inline.hpp"
  35 #if INCLUDE_ALL_GCS
  36 #include "gc/g1/g1BarrierSet.hpp"
  37 #endif // INCLUDE_ALL_GCS
  38 
  39 #define __ ce->masm()->
  40 
  41 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
  42                                bool throw_index_out_of_bounds_exception)
  43   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
  44   , _index(index)
  45 {
  46   assert(info != NULL, "must have info");
  47   _info = new CodeEmitInfo(info);
  48 }
  49 
  50 
  51 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  52   __ bind(_entry);
  53 
  54   if (_info->deoptimize_on_exception()) {
  55     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
  56     __ call(a, relocInfo::runtime_call_type);
  57     __ delayed()->nop();
  58     ce->add_call_info_here(_info);
  59     ce->verify_oop_map(_info);
  60     debug_only(__ should_not_reach_here());
  61     return;
  62   }
  63 
  64   if (_index->is_register()) {
  65     __ mov(_index->as_register(), G4);
  66   } else {
  67     __ set(_index->as_jint(), G4);
  68   }
  69   if (_throw_index_out_of_bounds_exception) {
  70     __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
  71   } else {
  72     __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
  73   }
  74   __ delayed()->nop();
  75   ce->add_call_info_here(_info);
  76   ce->verify_oop_map(_info);
  77   debug_only(__ should_not_reach_here());
  78 }
  79 
  80 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  81   _info = new CodeEmitInfo(info);
  82 }
  83 
  84 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
  85   __ bind(_entry);
  86   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
  87   __ call(a, relocInfo::runtime_call_type);
  88   __ delayed()->nop();
  89   ce->add_call_info_here(_info);
  90   ce->verify_oop_map(_info);
  91   debug_only(__ should_not_reach_here());
  92 }
  93 
  94 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  95   __ bind(_entry);
  96   __ set(_bci, G4);
  97   Metadata *m = _method->as_constant_ptr()->as_metadata();
  98   __ set_metadata_constant(m, G5);
  99   __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
 100   __ delayed()->nop();
 101   ce->add_call_info_here(_info);
 102   ce->verify_oop_map(_info);
 103 
 104   __ br(Assembler::always, true, Assembler::pt, _continuation);
 105   __ delayed()->nop();
 106 }
 107 
 108 
 109 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 110   if (_offset != -1) {
 111     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 112   }
 113   __ bind(_entry);
 114   __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
 115   __ delayed()->nop();
 116   ce->add_call_info_here(_info);
 117   ce->verify_oop_map(_info);
 118 #ifdef ASSERT
 119   __ should_not_reach_here();
 120 #endif
 121 }
 122 
 123 
 124 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 125   address a;
 126   if (_info->deoptimize_on_exception()) {
 127     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 128     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 129   } else {
 130     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 131   }
 132 
 133   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 134   __ bind(_entry);
 135   __ call(a, relocInfo::runtime_call_type);
 136   __ delayed()->nop();
 137   ce->add_call_info_here(_info);
 138   ce->verify_oop_map(_info);
 139 #ifdef ASSERT
 140   __ should_not_reach_here();
 141 #endif
 142 }
 143 
 144 
 145 // Implementation of SimpleExceptionStub
 146 // Note: %g1 and %g3 are already in use
 147 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 148   __ bind(_entry);
 149   __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
 150 
 151   if (_obj->is_valid()) {
 152     __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub
 153   } else {
 154     __ delayed()->mov(G0, G4);
 155   }
 156   ce->add_call_info_here(_info);
 157 #ifdef ASSERT
 158   __ should_not_reach_here();
 159 #endif
 160 }
 161 
 162 
 163 // Implementation of NewInstanceStub
 164 
 165 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 166   _result = result;
 167   _klass = klass;
 168   _klass_reg = klass_reg;
 169   _info = new CodeEmitInfo(info);
 170   assert(stub_id == Runtime1::new_instance_id                 ||
 171          stub_id == Runtime1::fast_new_instance_id            ||
 172          stub_id == Runtime1::fast_new_instance_init_check_id,
 173          "need new_instance id");
 174   _stub_id   = stub_id;
 175 }
 176 
 177 
 178 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 179   __ bind(_entry);
 180   __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
 181   __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
 182   ce->add_call_info_here(_info);
 183   ce->verify_oop_map(_info);
 184   __ br(Assembler::always, false, Assembler::pt, _continuation);
 185   __ delayed()->mov_or_nop(O0, _result->as_register());
 186 }
 187 
 188 
 189 // Implementation of NewTypeArrayStub
 190 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 191   _klass_reg = klass_reg;
 192   _length = length;
 193   _result = result;
 194   _info = new CodeEmitInfo(info);
 195 }
 196 
 197 
 198 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 199   __ bind(_entry);
 200 
 201   __ mov(_length->as_register(), G4);
 202   __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
 203   __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
 204   ce->add_call_info_here(_info);
 205   ce->verify_oop_map(_info);
 206   __ br(Assembler::always, false, Assembler::pt, _continuation);
 207   __ delayed()->mov_or_nop(O0, _result->as_register());
 208 }
 209 
 210 
 211 // Implementation of NewObjectArrayStub
 212 
 213 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 214   _klass_reg = klass_reg;
 215   _length = length;
 216   _result = result;
 217   _info = new CodeEmitInfo(info);
 218 }
 219 
 220 
 221 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 222   __ bind(_entry);
 223 
 224   __ mov(_length->as_register(), G4);
 225   __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
 226   __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
 227   ce->add_call_info_here(_info);
 228   ce->verify_oop_map(_info);
 229   __ br(Assembler::always, false, Assembler::pt, _continuation);
 230   __ delayed()->mov_or_nop(O0, _result->as_register());
 231 }
 232 
 233 
 234 // Implementation of MonitorAccessStubs
 235 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 236   : MonitorAccessStub(obj_reg, lock_reg) {
 237   _info = new CodeEmitInfo(info);
 238 }
 239 
 240 
 241 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 242   __ bind(_entry);
 243   __ mov(_obj_reg->as_register(), G4);
 244   if (ce->compilation()->has_fpu_code()) {
 245     __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
 246   } else {
 247     __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
 248   }
 249   __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
 250   ce->add_call_info_here(_info);
 251   ce->verify_oop_map(_info);
 252   __ br(Assembler::always, true, Assembler::pt, _continuation);
 253   __ delayed()->nop();
 254 }
 255 
 256 
 257 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 258   __ bind(_entry);
 259   if (_compute_lock) {
 260     ce->monitor_address(_monitor_ix, _lock_reg);
 261   }
 262   if (ce->compilation()->has_fpu_code()) {
 263     __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
 264   } else {
 265     __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
 266   }
 267 
 268   __ delayed()->mov_or_nop(_lock_reg->as_register(), G4);
 269   __ br(Assembler::always, true, Assembler::pt, _continuation);
 270   __ delayed()->nop();
 271 }
 272 
 273 // Implementation of patching:
 274 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 275 // - Replace original code with a call to the stub
 276 // At Runtime:
 277 // - call to stub, jump to runtime
 278 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
 279 // - in runtime: after initializing class, restore original code, reexecute instruction
 280 
 281 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
 282 
 283 void PatchingStub::align_patch_site(MacroAssembler* ) {
 284   // patch sites on sparc are always properly aligned.
 285 }
 286 
 287 void PatchingStub::emit_code(LIR_Assembler* ce) {
 288   // copy original code here
 289   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 290          "not enough room for call");
 291   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
 292 
 293   Label call_patch;
 294 
 295   int being_initialized_entry = __ offset();
 296 
 297   if (_id == load_klass_id) {
 298     // produce a copy of the load klass instruction for use by the being initialized case
 299 #ifdef ASSERT
 300     address start = __ pc();
 301 #endif
 302     AddressLiteral addrlit(NULL, metadata_Relocation::spec(_index));
 303     __ patchable_set(addrlit, _obj);
 304 
 305 #ifdef ASSERT
 306     for (int i = 0; i < _bytes_to_copy; i++) {
 307       address ptr = (address)(_pc_start + i);
 308       int a_byte = (*ptr) & 0xFF;
 309       assert(a_byte == *start++, "should be the same code");
 310     }
 311 #endif
 312   } else if (_id == load_mirror_id || _id == load_appendix_id) {
 313     // produce a copy of the load mirror instruction for use by the being initialized case
 314 #ifdef ASSERT
 315     address start = __ pc();
 316 #endif
 317     AddressLiteral addrlit(NULL, oop_Relocation::spec(_index));
 318     __ patchable_set(addrlit, _obj);
 319 
 320 #ifdef ASSERT
 321     for (int i = 0; i < _bytes_to_copy; i++) {
 322       address ptr = (address)(_pc_start + i);
 323       int a_byte = (*ptr) & 0xFF;
 324       assert(a_byte == *start++, "should be the same code");
 325     }
 326 #endif
 327   } else {
 328     // make a copy the code which is going to be patched.
 329     for (int i = 0; i < _bytes_to_copy; i++) {
 330       address ptr = (address)(_pc_start + i);
 331       int a_byte = (*ptr) & 0xFF;
 332       __ emit_int8 (a_byte);
 333     }
 334   }
 335 
 336   address end_of_patch = __ pc();
 337   int bytes_to_skip = 0;
 338   if (_id == load_mirror_id) {
 339     int offset = __ offset();
 340     if (CommentedAssembly) {
 341       __ block_comment(" being_initialized check");
 342     }
 343 
 344     // static field accesses have special semantics while the class
 345     // initializer is being run so we emit a test which can be used to
 346     // check that this code is being executed by the initializing
 347     // thread.
 348     assert(_obj != noreg, "must be a valid register");
 349     assert(_index >= 0, "must have oop index");
 350     __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
 351     __ ld_ptr(G3, in_bytes(InstanceKlass::init_thread_offset()), G3);
 352     __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
 353 
 354     // load_klass patches may execute the patched code before it's
 355     // copied back into place so we need to jump back into the main
 356     // code of the nmethod to continue execution.
 357     __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
 358     __ delayed()->nop();
 359 
 360     // make sure this extra code gets skipped
 361     bytes_to_skip += __ offset() - offset;
 362   }
 363 
 364   // Now emit the patch record telling the runtime how to find the
 365   // pieces of the patch.  We only need 3 bytes but it has to be
 366   // aligned as an instruction so emit 4 bytes.
 367   int sizeof_patch_record = 4;
 368   bytes_to_skip += sizeof_patch_record;
 369 
 370   // emit the offsets needed to find the code to patch
 371   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 372 
 373   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte
 374   __ emit_int8(0);
 375   __ emit_int8(being_initialized_entry_offset);
 376   __ emit_int8(bytes_to_skip);
 377   __ emit_int8(_bytes_to_copy);
 378   address patch_info_pc = __ pc();
 379   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 380 
 381   address entry = __ pc();
 382   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 383   address target = NULL;
 384   relocInfo::relocType reloc_type = relocInfo::none;
 385   switch (_id) {
 386     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 387     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 388     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 389     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 390     default: ShouldNotReachHere();
 391   }
 392   __ bind(call_patch);
 393 
 394   if (CommentedAssembly) {
 395     __ block_comment("patch entry point");
 396   }
 397   __ call(target, relocInfo::runtime_call_type);
 398   __ delayed()->nop();
 399   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 400   ce->add_call_info_here(_info);
 401   __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
 402   __ delayed()->nop();
 403   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 404     CodeSection* cs = __ code_section();
 405     address pc = (address)_pc_start;
 406     RelocIterator iter(cs, pc, pc + 1);
 407     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
 408 
 409     pc = (address)(_pc_start + NativeMovConstReg::add_offset);
 410     RelocIterator iter2(cs, pc, pc+1);
 411     relocInfo::change_reloc_info_for_address(&iter2, (address) pc, reloc_type, relocInfo::none);
 412   }
 413 
 414 }
 415 
 416 
 417 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 418   __ bind(_entry);
 419   __ set(_trap_request, G4);
 420   __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
 421   __ delayed()->nop();
 422   ce->add_call_info_here(_info);
 423   DEBUG_ONLY(__ should_not_reach_here());
 424 }
 425 
 426 
 427 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 428   //---------------slow case: call to native-----------------
 429   __ bind(_entry);
 430   __ mov(src()->as_register(),     O0);
 431   __ mov(src_pos()->as_register(), O1);
 432   __ mov(dst()->as_register(),     O2);
 433   __ mov(dst_pos()->as_register(), O3);
 434   __ mov(length()->as_register(),  O4);
 435 
 436   ce->emit_static_call_stub();
 437   if (ce->compilation()->bailed_out()) {
 438     return; // CodeCache is full
 439   }
 440 
 441   __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
 442   __ delayed()->nop();
 443   ce->add_call_info_here(info());
 444   ce->verify_oop_map(info());
 445 
 446 #ifndef PRODUCT
 447   __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0);
 448   __ ld(O0, 0, O1);
 449   __ inc(O1);
 450   __ st(O1, 0, O0);
 451 #endif
 452 
 453   __ br(Assembler::always, false, Assembler::pt, _continuation);
 454   __ delayed()->nop();
 455 }
 456 
 457 
 458 ///////////////////////////////////////////////////////////////////////////////////
 459 #if INCLUDE_ALL_GCS
 460 
 461 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
 462   // At this point we know that marking is in progress.
 463   // If do_load() is true then we have to emit the
 464   // load of the previous value; otherwise it has already
 465   // been loaded into _pre_val.
 466 
 467   __ bind(_entry);
 468 
 469   assert(pre_val()->is_register(), "Precondition.");
 470   Register pre_val_reg = pre_val()->as_register();
 471 
 472   if (do_load()) {
 473     ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
 474   }
 475 
 476   if (__ is_in_wdisp16_range(_continuation)) {
 477     __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation);
 478   } else {
 479     __ cmp(pre_val_reg, G0);
 480     __ brx(Assembler::equal, false, Assembler::pn, _continuation);
 481   }
 482   __ delayed()->nop();
 483 
 484   __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
 485   __ delayed()->mov(pre_val_reg, G4);
 486   __ br(Assembler::always, false, Assembler::pt, _continuation);
 487   __ delayed()->nop();
 488 
 489 }
 490 
 491 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
 492   __ bind(_entry);
 493 
 494   assert(addr()->is_register(), "Precondition.");
 495   assert(new_val()->is_register(), "Precondition.");
 496   Register addr_reg = addr()->as_pointer_register();
 497   Register new_val_reg = new_val()->as_register();
 498 
 499   if (__ is_in_wdisp16_range(_continuation)) {
 500     __ br_null(new_val_reg, /*annul*/false, Assembler::pt, _continuation);
 501   } else {
 502     __ cmp(new_val_reg, G0);
 503     __ brx(Assembler::equal, false, Assembler::pn, _continuation);
 504   }
 505   __ delayed()->nop();
 506 
 507   __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
 508   __ delayed()->mov(addr_reg, G4);
 509   __ br(Assembler::always, false, Assembler::pt, _continuation);
 510   __ delayed()->nop();
 511 }
 512 
 513 #endif // INCLUDE_ALL_GCS
 514 ///////////////////////////////////////////////////////////////////////////////////
 515 
 516 #undef __