1 /*
  2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "c1/c1_CodeStubs.hpp"
 28 #include "c1/c1_FrameMap.hpp"
 29 #include "c1/c1_LIRAssembler.hpp"
 30 #include "c1/c1_MacroAssembler.hpp"
 31 #include "c1/c1_Runtime1.hpp"
 32 #include "nativeInst_aarch64.hpp"
 33 #include "runtime/sharedRuntime.hpp"
 34 #include "vmreg_aarch64.inline.hpp"
 35 #if INCLUDE_ALL_GCS
 36 #include "gc/g1/g1BarrierSet.hpp"
 37 #endif
 38 
 39 
 40 #define __ ce->masm()->
 41 
 42 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 43   __ bind(_entry);
 44   Metadata *m = _method->as_constant_ptr()->as_metadata();
 45   __ mov_metadata(rscratch1, m);
 46   ce->store_parameter(rscratch1, 1);
 47   ce->store_parameter(_bci, 0);
 48   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
 49   ce->add_call_info_here(_info);
 50   ce->verify_oop_map(_info);
 51   __ b(_continuation);
 52 }
 53 
 54 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
 55                                bool throw_index_out_of_bounds_exception)
 56   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
 57   , _index(index)
 58 {
 59   assert(info != NULL, "must have info");
 60   _info = new CodeEmitInfo(info);
 61 }
 62 
 63 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
 64   __ bind(_entry);
 65   if (_info->deoptimize_on_exception()) {
 66     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 67     __ far_call(RuntimeAddress(a));
 68     ce->add_call_info_here(_info);
 69     ce->verify_oop_map(_info);
 70     debug_only(__ should_not_reach_here());
 71     return;
 72   }
 73 
 74   if (_index->is_cpu_register()) {
 75     __ mov(rscratch1, _index->as_register());
 76   } else {
 77     __ mov(rscratch1, _index->as_jint());
 78   }
 79   Runtime1::StubID stub_id;
 80   if (_throw_index_out_of_bounds_exception) {
 81     stub_id = Runtime1::throw_index_exception_id;
 82   } else {
 83     stub_id = Runtime1::throw_range_check_failed_id;
 84   }
 85   __ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2);
 86   ce->add_call_info_here(_info);
 87   ce->verify_oop_map(_info);
 88   debug_only(__ should_not_reach_here());
 89 }
 90 
 91 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 92   _info = new CodeEmitInfo(info);
 93 }
 94 
 95 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 96   __ bind(_entry);
 97   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 98   __ far_call(RuntimeAddress(a));
 99   ce->add_call_info_here(_info);
100   ce->verify_oop_map(_info);
101   debug_only(__ should_not_reach_here());
102 }
103 
104 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
105   if (_offset != -1) {
106     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
107   }
108   __ bind(_entry);
109   __ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type));
110   ce->add_call_info_here(_info);
111   ce->verify_oop_map(_info);
112 #ifdef ASSERT
113   __ should_not_reach_here();
114 #endif
115 }
116 
117 
118 
119 // Implementation of NewInstanceStub
120 
121 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
122   _result = result;
123   _klass = klass;
124   _klass_reg = klass_reg;
125   _info = new CodeEmitInfo(info);
126   assert(stub_id == Runtime1::new_instance_id                 ||
127          stub_id == Runtime1::fast_new_instance_id            ||
128          stub_id == Runtime1::fast_new_instance_init_check_id,
129          "need new_instance id");
130   _stub_id   = stub_id;
131 }
132 
133 
134 
135 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
136   assert(__ rsp_offset() == 0, "frame size should be fixed");
137   __ bind(_entry);
138   __ mov(r3, _klass_reg->as_register());
139   __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
140   ce->add_call_info_here(_info);
141   ce->verify_oop_map(_info);
142   assert(_result->as_register() == r0, "result must in r0,");
143   __ b(_continuation);
144 }
145 
146 
147 // Implementation of NewTypeArrayStub
148 
149 // Implementation of NewTypeArrayStub
150 
151 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
152   _klass_reg = klass_reg;
153   _length = length;
154   _result = result;
155   _info = new CodeEmitInfo(info);
156 }
157 
158 
159 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
160   assert(__ rsp_offset() == 0, "frame size should be fixed");
161   __ bind(_entry);
162   assert(_length->as_register() == r19, "length must in r19,");
163   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
164   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
165   ce->add_call_info_here(_info);
166   ce->verify_oop_map(_info);
167   assert(_result->as_register() == r0, "result must in r0");
168   __ b(_continuation);
169 }
170 
171 
172 // Implementation of NewObjectArrayStub
173 
174 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
175   _klass_reg = klass_reg;
176   _result = result;
177   _length = length;
178   _info = new CodeEmitInfo(info);
179 }
180 
181 
182 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
183   assert(__ rsp_offset() == 0, "frame size should be fixed");
184   __ bind(_entry);
185   assert(_length->as_register() == r19, "length must in r19,");
186   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
187   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
188   ce->add_call_info_here(_info);
189   ce->verify_oop_map(_info);
190   assert(_result->as_register() == r0, "result must in r0");
191   __ b(_continuation);
192 }
193 // Implementation of MonitorAccessStubs
194 
195 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
196 : MonitorAccessStub(obj_reg, lock_reg)
197 {
198   _info = new CodeEmitInfo(info);
199 }
200 
201 
202 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
203   assert(__ rsp_offset() == 0, "frame size should be fixed");
204   __ bind(_entry);
205   ce->store_parameter(_obj_reg->as_register(),  1);
206   ce->store_parameter(_lock_reg->as_register(), 0);
207   Runtime1::StubID enter_id;
208   if (ce->compilation()->has_fpu_code()) {
209     enter_id = Runtime1::monitorenter_id;
210   } else {
211     enter_id = Runtime1::monitorenter_nofpu_id;
212   }
213   __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
214   ce->add_call_info_here(_info);
215   ce->verify_oop_map(_info);
216   __ b(_continuation);
217 }
218 
219 
220 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
221   __ bind(_entry);
222   if (_compute_lock) {
223     // lock_reg was destroyed by fast unlocking attempt => recompute it
224     ce->monitor_address(_monitor_ix, _lock_reg);
225   }
226   ce->store_parameter(_lock_reg->as_register(), 0);
227   // note: non-blocking leaf routine => no call info needed
228   Runtime1::StubID exit_id;
229   if (ce->compilation()->has_fpu_code()) {
230     exit_id = Runtime1::monitorexit_id;
231   } else {
232     exit_id = Runtime1::monitorexit_nofpu_id;
233   }
234   __ adr(lr, _continuation);
235   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
236 }
237 
238 
239 // Implementation of patching:
240 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
241 // - Replace original code with a call to the stub
242 // At Runtime:
243 // - call to stub, jump to runtime
244 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
245 // - in runtime: after initializing class, restore original code, reexecute instruction
246 
247 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
248 
249 void PatchingStub::align_patch_site(MacroAssembler* masm) {
250 }
251 
252 void PatchingStub::emit_code(LIR_Assembler* ce) {
253   assert(false, "AArch64 should not use C1 runtime patching");
254 }
255 
256 
257 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
258   __ bind(_entry);
259   ce->store_parameter(_trap_request, 0);
260   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
261   ce->add_call_info_here(_info);
262   DEBUG_ONLY(__ should_not_reach_here());
263 }
264 
265 
266 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
267   address a;
268   if (_info->deoptimize_on_exception()) {
269     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
270     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
271   } else {
272     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
273   }
274 
275   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
276   __ bind(_entry);
277   __ far_call(RuntimeAddress(a));
278   ce->add_call_info_here(_info);
279   ce->verify_oop_map(_info);
280   debug_only(__ should_not_reach_here());
281 }
282 
283 
284 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
285   assert(__ rsp_offset() == 0, "frame size should be fixed");
286 
287   __ bind(_entry);
288   // pass the object in a scratch register because all other registers
289   // must be preserved
290   if (_obj->is_cpu_register()) {
291     __ mov(rscratch1, _obj->as_register());
292   }
293   __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2);
294   ce->add_call_info_here(_info);
295   debug_only(__ should_not_reach_here());
296 }
297 
298 
299 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
300   //---------------slow case: call to native-----------------
301   __ bind(_entry);
302   // Figure out where the args should go
303   // This should really convert the IntrinsicID to the Method* and signature
304   // but I don't know how to do that.
305   //
306   VMRegPair args[5];
307   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
308   SharedRuntime::java_calling_convention(signature, args, 5, true);
309 
310   // push parameters
311   // (src, src_pos, dest, destPos, length)
312   Register r[5];
313   r[0] = src()->as_register();
314   r[1] = src_pos()->as_register();
315   r[2] = dst()->as_register();
316   r[3] = dst_pos()->as_register();
317   r[4] = length()->as_register();
318 
319   // next registers will get stored on the stack
320   for (int i = 0; i < 5 ; i++ ) {
321     VMReg r_1 = args[i].first();
322     if (r_1->is_stack()) {
323       int st_off = r_1->reg2stack() * wordSize;
324       __ str (r[i], Address(sp, st_off));
325     } else {
326       assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
327     }
328   }
329 
330   ce->align_call(lir_static_call);
331 
332   ce->emit_static_call_stub();
333   if (ce->compilation()->bailed_out()) {
334     return; // CodeCache is full
335   }
336   Address resolve(SharedRuntime::get_resolve_static_call_stub(),
337                   relocInfo::static_call_type);
338   address call = __ trampoline_call(resolve);
339   if (call == NULL) {
340     ce->bailout("trampoline stub overflow");
341     return;
342   }
343   ce->add_call_info_here(info());
344 
345 #ifndef PRODUCT
346   __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
347   __ incrementw(Address(rscratch2));
348 #endif
349 
350   __ b(_continuation);
351 }
352 
353 
354 /////////////////////////////////////////////////////////////////////////////
355 #if INCLUDE_ALL_GCS
356 
357 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
358   // At this point we know that marking is in progress.
359   // If do_load() is true then we have to emit the
360   // load of the previous value; otherwise it has already
361   // been loaded into _pre_val.
362 
363   __ bind(_entry);
364   assert(pre_val()->is_register(), "Precondition.");
365 
366   Register pre_val_reg = pre_val()->as_register();
367 
368   if (do_load()) {
369     ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
370   }
371   __ cbz(pre_val_reg, _continuation);
372   ce->store_parameter(pre_val()->as_register(), 0);
373   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
374   __ b(_continuation);
375 }
376 
377 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
378   __ bind(_entry);
379   assert(addr()->is_register(), "Precondition.");
380   assert(new_val()->is_register(), "Precondition.");
381   Register new_val_reg = new_val()->as_register();
382   __ cbz(new_val_reg, _continuation);
383   ce->store_parameter(addr()->as_pointer_register(), 0);
384   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
385   __ b(_continuation);
386 }
387 
388 #endif // INCLUDE_ALL_GCS
389 /////////////////////////////////////////////////////////////////////////////
390 
391 #undef __