1 /*
2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterRuntime.hpp"
30 #include "interpreter/interp_masm.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "memory/universe.inline.hpp"
33 #include "oops/objArrayKlass.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/methodHandles.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/synchronizer.hpp"
39
40 #ifdef PRODUCT
41 #define __ _masm->
42 #define BLOCK_COMMENT(str)
43 #define BIND(label) __ bind(label);
44 #else
45 #define __ (PRODUCT_ONLY(false&&)Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
46 #define BLOCK_COMMENT(str) __ block_comment(str)
47 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":")
48 #endif
49
50 // The assumed minimum size of a BranchTableBlock.
51 // The actual size of each block heavily depends on the CPU capabilities and,
52 // of course, on the logic implemented in each block.
53 #ifdef ASSERT
54 #define BTB_MINSIZE 256
55 #else
56 #define BTB_MINSIZE 64
57 #endif
58
59 #ifdef ASSERT
60 // Macro to open a BranchTableBlock (a piece of code that is branched to by a calculated branch).
61 #define BTB_BEGIN(lbl, alignment, name) \
62 __ align_address(alignment); \
63 __ bind(lbl); \
64 { unsigned int b_off = __ offset(); \
65 uintptr_t b_addr = (uintptr_t)__ pc(); \
66 __ z_larl(Z_R0, (int64_t)0); /* Check current address alignment. */ \
67 __ z_slgr(Z_R0, br_tab); /* Current Address must be equal */ \
68 __ z_slgr(Z_R0, flags); /* to calculated branch target. */ \
69 __ z_brc(Assembler::bcondLogZero, 3); /* skip trap if ok. */ \
70 __ z_illtrap(0x55); \
71 guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name);
72
73 // Macro to close a BranchTableBlock (a piece of code that is branched to by a calculated branch).
74 #define BTB_END(lbl, alignment, name) \
75 uintptr_t e_addr = (uintptr_t)__ pc(); \
76 unsigned int e_off = __ offset(); \
77 unsigned int len = e_off-b_off; \
78 if (len > alignment) { \
79 tty->print_cr("%4d of %4d @ " INTPTR_FORMAT ": Block len for %s", \
80 len, alignment, e_addr-len, name); \
81 guarantee(len <= alignment, "block too large"); \
82 } \
83 guarantee(len == e_addr-b_addr, "block len mismatch"); \
84 }
85 #else
86 // Macro to open a BranchTableBlock (a piece of code that is branched to by a calculated branch).
87 #define BTB_BEGIN(lbl, alignment, name) \
88 __ align_address(alignment); \
89 __ bind(lbl); \
90 { unsigned int b_off = __ offset(); \
91 uintptr_t b_addr = (uintptr_t)__ pc(); \
92 guarantee(b_addr%alignment == 0, "bad alignment at begin of block" name);
93
94 // Macro to close a BranchTableBlock (a piece of code that is branched to by a calculated branch).
95 #define BTB_END(lbl, alignment, name) \
96 uintptr_t e_addr = (uintptr_t)__ pc(); \
97 unsigned int e_off = __ offset(); \
98 unsigned int len = e_off-b_off; \
99 if (len > alignment) { \
100 tty->print_cr("%4d of %4d @ " INTPTR_FORMAT ": Block len for %s", \
101 len, alignment, e_addr-len, name); \
102 guarantee(len <= alignment, "block too large"); \
103 } \
104 guarantee(len == e_addr-b_addr, "block len mismatch"); \
105 }
106 #endif // ASSERT
107
108 // Platform-dependent initialization.
109
110 void TemplateTable::pd_initialize() {
111 // No specific initialization.
112 }
113
114 // Address computation: local variables
115
116 static inline Address iaddress(int n) {
117 return Address(Z_locals, Interpreter::local_offset_in_bytes(n));
118 }
119
120 static inline Address laddress(int n) {
121 return iaddress(n + 1);
122 }
123
124 static inline Address faddress(int n) {
125 return iaddress(n);
126 }
127
128 static inline Address daddress(int n) {
129 return laddress(n);
130 }
131
132 static inline Address aaddress(int n) {
133 return iaddress(n);
134 }
135
136 // Pass NULL, if no shift instruction should be emitted.
137 static inline Address iaddress(InterpreterMacroAssembler *masm, Register r) {
138 if (masm) {
139 masm->z_sllg(r, r, LogBytesPerWord); // index2bytes
140 }
141 return Address(Z_locals, r, Interpreter::local_offset_in_bytes(0));
142 }
143
144 // Pass NULL, if no shift instruction should be emitted.
145 static inline Address laddress(InterpreterMacroAssembler *masm, Register r) {
146 if (masm) {
147 masm->z_sllg(r, r, LogBytesPerWord); // index2bytes
148 }
149 return Address(Z_locals, r, Interpreter::local_offset_in_bytes(1) );
150 }
151
152 static inline Address faddress(InterpreterMacroAssembler *masm, Register r) {
153 return iaddress(masm, r);
154 }
155
156 static inline Address daddress(InterpreterMacroAssembler *masm, Register r) {
157 return laddress(masm, r);
158 }
159
160 static inline Address aaddress(InterpreterMacroAssembler *masm, Register r) {
161 return iaddress(masm, r);
162 }
163
164 // At top of Java expression stack which may be different than esp(). It
165 // isn't for category 1 objects.
166 static inline Address at_tos(int slot = 0) {
167 return Address(Z_esp, Interpreter::expr_offset_in_bytes(slot));
168 }
169
170 // Condition conversion
171 static Assembler::branch_condition j_not(TemplateTable::Condition cc) {
172 switch (cc) {
173 case TemplateTable::equal :
174 return Assembler::bcondNotEqual;
175 case TemplateTable::not_equal :
176 return Assembler::bcondEqual;
177 case TemplateTable::less :
178 return Assembler::bcondNotLow;
179 case TemplateTable::less_equal :
180 return Assembler::bcondHigh;
181 case TemplateTable::greater :
182 return Assembler::bcondNotHigh;
183 case TemplateTable::greater_equal:
184 return Assembler::bcondLow;
185 }
186 ShouldNotReachHere();
187 return Assembler::bcondZero;
188 }
189
190 // Do an oop store like *(base + offset) = val
191 // offset can be a register or a constant.
192 static void do_oop_store(InterpreterMacroAssembler* _masm,
193 Register base,
194 RegisterOrConstant offset,
195 Register val,
196 bool val_is_null, // == false does not guarantee that val really is not equal NULL.
197 Register tmp1, // If tmp3 is volatile, either tmp1 or tmp2 must be
198 Register tmp2, // non-volatile to hold a copy of pre_val across runtime calls.
199 Register tmp3, // Ideally, this tmp register is non-volatile, as it is used to
200 // hold pre_val (must survive runtime calls).
201 BarrierSet::Name barrier,
202 bool precise) {
203 BLOCK_COMMENT("do_oop_store {");
204 assert(val != noreg, "val must always be valid, even if it is zero");
205 assert_different_registers(tmp1, tmp2, tmp3, val, base, offset.register_or_noreg());
206 __ verify_oop(val);
207 switch (barrier) {
208 #if INCLUDE_ALL_GCS
209 case BarrierSet::G1SATBCTLogging:
210 {
211 #ifdef ASSERT
212 if (val_is_null) { // Check if the flag setting reflects reality.
213 Label OK;
214 __ z_ltgr(val, val);
215 __ z_bre(OK);
216 __ z_illtrap(0x11);
217 __ bind(OK);
218 }
219 #endif
220 Register pre_val = tmp3;
221 // Load and record the previous value.
222 __ g1_write_barrier_pre(base, offset, pre_val, val,
223 tmp1, tmp2,
224 false); // Needs to hold pre_val in non_volatile register?
225
226 if (val_is_null) {
227 __ store_heap_oop_null(val, offset, base);
228 } else {
229 Label Done;
230 // val_is_null == false does not guarantee that val really is not equal NULL.
231 // Checking for this case dynamically has some cost, but also some benefit (in GC).
232 // It's hard to say if cost or benefit is greater.
233 { Label OK;
234 __ z_ltgr(val, val);
235 __ z_brne(OK);
236 __ store_heap_oop_null(val, offset, base);
237 __ z_bru(Done);
238 __ bind(OK);
239 }
240 // G1 barrier needs uncompressed oop for region cross check.
241 // Store_heap_oop compresses the oop in the argument register.
242 Register val_work = val;
243 if (UseCompressedOops) {
244 val_work = tmp3;
245 __ z_lgr(val_work, val);
246 }
247 __ store_heap_oop_not_null(val_work, offset, base);
248
249 // We need precise card marks for oop array stores.
250 // Otherwise, cardmarking the object which contains the oop is sufficient.
251 if (precise && !(offset.is_constant() && offset.as_constant() == 0)) {
252 __ add2reg_with_index(base,
253 offset.constant_or_zero(),
254 offset.register_or_noreg(),
255 base);
256 }
257 __ g1_write_barrier_post(base /* store_adr */, val, tmp1, tmp2, tmp3);
258 __ bind(Done);
259 }
260 }
261 break;
262 #endif // INCLUDE_ALL_GCS
263 case BarrierSet::CardTableForRS:
264 case BarrierSet::CardTableExtension:
265 {
266 if (val_is_null) {
267 __ store_heap_oop_null(val, offset, base);
268 } else {
269 __ store_heap_oop(val, offset, base);
270 // Flatten object address if needed.
271 if (precise && ((offset.register_or_noreg() != noreg) || (offset.constant_or_zero() != 0))) {
272 __ load_address(base, Address(base, offset.register_or_noreg(), offset.constant_or_zero()));
273 }
274 __ card_write_barrier_post(base, tmp1);
275 }
276 }
277 break;
278 case BarrierSet::ModRef:
279 // fall through
280 default:
281 ShouldNotReachHere();
282
283 }
284 BLOCK_COMMENT("} do_oop_store");
285 }
286
287 Address TemplateTable::at_bcp(int offset) {
288 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
289 return Address(Z_bcp, offset);
290 }
291
292 void TemplateTable::patch_bytecode(Bytecodes::Code bc,
293 Register bc_reg,
294 Register temp_reg,
295 bool load_bc_into_bc_reg, // = true
296 int byte_no) {
297 if (!RewriteBytecodes) { return; }
298
299 NearLabel L_patch_done;
300 BLOCK_COMMENT("patch_bytecode {");
301
302 switch (bc) {
303 case Bytecodes::_fast_aputfield:
304 case Bytecodes::_fast_bputfield:
305 case Bytecodes::_fast_zputfield:
306 case Bytecodes::_fast_cputfield:
307 case Bytecodes::_fast_dputfield:
308 case Bytecodes::_fast_fputfield:
309 case Bytecodes::_fast_iputfield:
310 case Bytecodes::_fast_lputfield:
311 case Bytecodes::_fast_sputfield:
312 {
313 // We skip bytecode quickening for putfield instructions when
314 // the put_code written to the constant pool cache is zero.
315 // This is required so that every execution of this instruction
316 // calls out to InterpreterRuntime::resolve_get_put to do
317 // additional, required work.
318 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
319 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
320 __ get_cache_and_index_and_bytecode_at_bcp(Z_R1_scratch, bc_reg,
321 temp_reg, byte_no, 1);
322 __ load_const_optimized(bc_reg, bc);
323 __ compareU32_and_branch(temp_reg, (intptr_t)0,
324 Assembler::bcondZero, L_patch_done);
325 }
326 break;
327 default:
328 assert(byte_no == -1, "sanity");
329 // The pair bytecodes have already done the load.
330 if (load_bc_into_bc_reg) {
331 __ load_const_optimized(bc_reg, bc);
332 }
333 break;
334 }
335
336 if (JvmtiExport::can_post_breakpoint()) {
337
338 Label L_fast_patch;
339
340 // If a breakpoint is present we can't rewrite the stream directly.
341 __ z_cli(at_bcp(0), Bytecodes::_breakpoint);
342 __ z_brne(L_fast_patch);
343 __ get_method(temp_reg);
344 // Let breakpoint table handling rewrite to quicker bytecode.
345 __ call_VM_static(noreg,
346 CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at),
347 temp_reg, Z_R13, bc_reg);
348 __ z_bru(L_patch_done);
349
350 __ bind(L_fast_patch);
351 }
352
353 #ifdef ASSERT
354 NearLabel L_okay;
355
356 // We load into 64 bits, since this works on any CPU.
357 __ z_llgc(temp_reg, at_bcp(0));
358 __ compareU32_and_branch(temp_reg, Bytecodes::java_code(bc),
359 Assembler::bcondEqual, L_okay );
360 __ compareU32_and_branch(temp_reg, bc_reg, Assembler::bcondEqual, L_okay);
361 __ stop_static("patching the wrong bytecode");
362 __ bind(L_okay);
363 #endif
364
365 // Patch bytecode.
366 __ z_stc(bc_reg, at_bcp(0));
367
368 __ bind(L_patch_done);
369 BLOCK_COMMENT("} patch_bytecode");
370 }
371
372 // Individual instructions
373
374 void TemplateTable::nop() {
375 transition(vtos, vtos);
376 }
377
378 void TemplateTable::shouldnotreachhere() {
379 transition(vtos, vtos);
380 __ stop("shouldnotreachhere bytecode");
381 }
382
383 void TemplateTable::aconst_null() {
384 transition(vtos, atos);
385 __ clear_reg(Z_tos, true, false);
386 }
387
388 void TemplateTable::iconst(int value) {
389 transition(vtos, itos);
390 // Zero extension of the iconst makes zero extension at runtime obsolete.
391 __ load_const_optimized(Z_tos, ((unsigned long)(unsigned int)value));
392 }
393
394 void TemplateTable::lconst(int value) {
395 transition(vtos, ltos);
396 __ load_const_optimized(Z_tos, value);
397 }
398
399 // No pc-relative load/store for floats.
400 void TemplateTable::fconst(int value) {
401 transition(vtos, ftos);
402 static float one = 1.0f, two = 2.0f;
403
404 switch (value) {
405 case 0:
406 __ z_lzer(Z_ftos);
407 return;
408 case 1:
409 __ load_absolute_address(Z_R1_scratch, (address) &one);
410 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch), false);
411 return;
412 case 2:
413 __ load_absolute_address(Z_R1_scratch, (address) &two);
414 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch), false);
415 return;
416 default:
417 ShouldNotReachHere();
418 return;
419 }
420 }
421
422 void TemplateTable::dconst(int value) {
423 transition(vtos, dtos);
424 static double one = 1.0;
425
426 switch (value) {
427 case 0:
428 __ z_lzdr(Z_ftos);
429 return;
430 case 1:
431 __ load_absolute_address(Z_R1_scratch, (address) &one);
432 __ mem2freg_opt(Z_ftos, Address(Z_R1_scratch));
433 return;
434 default:
435 ShouldNotReachHere();
436 return;
437 }
438 }
439
440 void TemplateTable::bipush() {
441 transition(vtos, itos);
442 __ z_lb(Z_tos, at_bcp(1));
443 }
444
445 void TemplateTable::sipush() {
446 transition(vtos, itos);
447 __ get_2_byte_integer_at_bcp(Z_tos, 1, InterpreterMacroAssembler::Signed);
448 }
449
450
451 void TemplateTable::ldc(bool wide) {
452 transition(vtos, vtos);
453 Label call_ldc, notFloat, notClass, Done;
454 const Register RcpIndex = Z_tmp_1;
455 const Register Rtags = Z_ARG2;
456
457 if (wide) {
458 __ get_2_byte_integer_at_bcp(RcpIndex, 1, InterpreterMacroAssembler::Unsigned);
459 } else {
460 __ z_llgc(RcpIndex, at_bcp(1));
461 }
462
463 __ get_cpool_and_tags(Z_tmp_2, Rtags);
464
465 const int base_offset = ConstantPool::header_size() * wordSize;
466 const int tags_offset = Array<u1>::base_offset_in_bytes();
467 const Register Raddr_type = Rtags;
468
469 // Get address of type.
470 __ add2reg_with_index(Raddr_type, tags_offset, RcpIndex, Rtags);
471
472 __ z_cli(0, Raddr_type, JVM_CONSTANT_UnresolvedClass);
473 __ z_bre(call_ldc); // Unresolved class - get the resolved class.
474
475 __ z_cli(0, Raddr_type, JVM_CONSTANT_UnresolvedClassInError);
476 __ z_bre(call_ldc); // Unresolved class in error state - call into runtime
477 // to throw the error from the first resolution attempt.
478
479 __ z_cli(0, Raddr_type, JVM_CONSTANT_Class);
480 __ z_brne(notClass); // Resolved class - need to call vm to get java
481 // mirror of the class.
482
483 // We deal with a class. Call vm to do the appropriate.
484 __ bind(call_ldc);
485 __ load_const_optimized(Z_ARG2, wide);
486 call_VM(Z_RET, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), Z_ARG2);
487 __ push_ptr(Z_RET);
488 __ z_bru(Done);
489
490 // Not a class.
491 __ bind(notClass);
492 Register RcpOffset = RcpIndex;
493 __ z_sllg(RcpOffset, RcpIndex, LogBytesPerWord); // Convert index to offset.
494 __ z_cli(0, Raddr_type, JVM_CONSTANT_Float);
495 __ z_brne(notFloat);
496
497 // ftos
498 __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, RcpOffset, base_offset), false);
499 __ push_f();
500 __ z_bru(Done);
501
502 __ bind(notFloat);
503 #ifdef ASSERT
504 {
505 Label L;
506
507 __ z_cli(0, Raddr_type, JVM_CONSTANT_Integer);
508 __ z_bre(L);
509 // String and Object are rewritten to fast_aldc.
510 __ stop("unexpected tag type in ldc");
511
512 __ bind(L);
513 }
514 #endif
515
516 // itos
517 __ mem2reg_opt(Z_tos, Address(Z_tmp_2, RcpOffset, base_offset), false);
518 __ push_i(Z_tos);
519
520 __ bind(Done);
521 }
522
523 // Fast path for caching oop constants.
524 // %%% We should use this to handle Class and String constants also.
525 // %%% It will simplify the ldc/primitive path considerably.
526 void TemplateTable::fast_aldc(bool wide) {
527 transition(vtos, atos);
528
529 const Register index = Z_tmp_2;
530 int index_size = wide ? sizeof(u2) : sizeof(u1);
531 Label L_resolved;
532
533 // We are resolved if the resolved reference cache entry contains a
534 // non-null object (CallSite, etc.).
535 __ get_cache_index_at_bcp(index, 1, index_size); // Load index.
536 __ load_resolved_reference_at_index(Z_tos, index);
537 __ z_ltgr(Z_tos, Z_tos);
538 __ z_brne(L_resolved);
539
540 // First time invocation - must resolve first.
541 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
542 __ load_const_optimized(Z_ARG1, (int)bytecode());
543 __ call_VM(Z_tos, entry, Z_ARG1);
544
545 __ bind(L_resolved);
546 __ verify_oop(Z_tos);
547 }
548
549 void TemplateTable::ldc2_w() {
550 transition(vtos, vtos);
551 Label Long, Done;
552
553 // Z_tmp_1 = index of cp entry
554 __ get_2_byte_integer_at_bcp(Z_tmp_1, 1, InterpreterMacroAssembler::Unsigned);
555
556 __ get_cpool_and_tags(Z_tmp_2, Z_tos);
557
558 const int base_offset = ConstantPool::header_size() * wordSize;
559 const int tags_offset = Array<u1>::base_offset_in_bytes();
560
561 // Get address of type.
562 __ add2reg_with_index(Z_tos, tags_offset, Z_tos, Z_tmp_1);
563
564 // Index needed in both branches, so calculate here.
565 __ z_sllg(Z_tmp_1, Z_tmp_1, LogBytesPerWord); // index2bytes
566
567 // Check type.
568 __ z_cli(0, Z_tos, JVM_CONSTANT_Double);
569 __ z_brne(Long);
570
571 // dtos
572 __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, Z_tmp_1, base_offset));
573 __ push_d();
574 __ z_bru(Done);
575
576 __ bind(Long);
577 // ltos
578 __ mem2reg_opt(Z_tos, Address(Z_tmp_2, Z_tmp_1, base_offset));
579 __ push_l();
580
581 __ bind(Done);
582 }
583
584 void TemplateTable::locals_index(Register reg, int offset) {
585 __ z_llgc(reg, at_bcp(offset));
586 __ z_lcgr(reg);
587 }
588
589 void TemplateTable::iload() {
590 iload_internal();
591 }
592
593 void TemplateTable::nofast_iload() {
594 iload_internal(may_not_rewrite);
595 }
596
597 void TemplateTable::iload_internal(RewriteControl rc) {
598 transition(vtos, itos);
599
600 if (RewriteFrequentPairs && rc == may_rewrite) {
601 NearLabel rewrite, done;
602 const Register bc = Z_ARG4;
603
604 assert(Z_R1_scratch != bc, "register damaged");
605
606 // Get next byte.
607 __ z_llgc(Z_R1_scratch, at_bcp(Bytecodes::length_for (Bytecodes::_iload)));
608
609 // If _iload, wait to rewrite to iload2. We only want to rewrite the
610 // last two iloads in a pair. Comparing against fast_iload means that
611 // the next bytecode is neither an iload or a caload, and therefore
612 // an iload pair.
613 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_iload,
614 Assembler::bcondEqual, done);
615
616 __ load_const_optimized(bc, Bytecodes::_fast_iload2);
617 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_iload,
618 Assembler::bcondEqual, rewrite);
619
620 // If _caload, rewrite to fast_icaload.
621 __ load_const_optimized(bc, Bytecodes::_fast_icaload);
622 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_caload,
623 Assembler::bcondEqual, rewrite);
624
625 // Rewrite so iload doesn't check again.
626 __ load_const_optimized(bc, Bytecodes::_fast_iload);
627
628 // rewrite
629 // bc: fast bytecode
630 __ bind(rewrite);
631 patch_bytecode(Bytecodes::_iload, bc, Z_R1_scratch, false);
632
633 __ bind(done);
634
635 }
636
637 // Get the local value into tos.
638 locals_index(Z_R1_scratch);
639 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
640 }
641
642 void TemplateTable::fast_iload2() {
643 transition(vtos, itos);
644
645 locals_index(Z_R1_scratch);
646 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
647 __ push_i(Z_tos);
648 locals_index(Z_R1_scratch, 3);
649 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
650 }
651
652 void TemplateTable::fast_iload() {
653 transition(vtos, itos);
654
655 locals_index(Z_R1_scratch);
656 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
657 }
658
659 void TemplateTable::lload() {
660 transition(vtos, ltos);
661
662 locals_index(Z_R1_scratch);
663 __ mem2reg_opt(Z_tos, laddress(_masm, Z_R1_scratch));
664 }
665
666 void TemplateTable::fload() {
667 transition(vtos, ftos);
668
669 locals_index(Z_R1_scratch);
670 __ mem2freg_opt(Z_ftos, faddress(_masm, Z_R1_scratch), false);
671 }
672
673 void TemplateTable::dload() {
674 transition(vtos, dtos);
675
676 locals_index(Z_R1_scratch);
677 __ mem2freg_opt(Z_ftos, daddress(_masm, Z_R1_scratch));
678 }
679
680 void TemplateTable::aload() {
681 transition(vtos, atos);
682
683 locals_index(Z_R1_scratch);
684 __ mem2reg_opt(Z_tos, aaddress(_masm, Z_R1_scratch));
685 }
686
687 void TemplateTable::locals_index_wide(Register reg) {
688 __ get_2_byte_integer_at_bcp(reg, 2, InterpreterMacroAssembler::Unsigned);
689 __ z_lcgr(reg);
690 }
691
692 void TemplateTable::wide_iload() {
693 transition(vtos, itos);
694
695 locals_index_wide(Z_tmp_1);
696 __ mem2reg_opt(Z_tos, iaddress(_masm, Z_tmp_1), false);
697 }
698
699 void TemplateTable::wide_lload() {
700 transition(vtos, ltos);
701
702 locals_index_wide(Z_tmp_1);
703 __ mem2reg_opt(Z_tos, laddress(_masm, Z_tmp_1));
704 }
705
706 void TemplateTable::wide_fload() {
707 transition(vtos, ftos);
708
709 locals_index_wide(Z_tmp_1);
710 __ mem2freg_opt(Z_ftos, faddress(_masm, Z_tmp_1), false);
711 }
712
713 void TemplateTable::wide_dload() {
714 transition(vtos, dtos);
715
716 locals_index_wide(Z_tmp_1);
717 __ mem2freg_opt(Z_ftos, daddress(_masm, Z_tmp_1));
718 }
719
720 void TemplateTable::wide_aload() {
721 transition(vtos, atos);
722
723 locals_index_wide(Z_tmp_1);
724 __ mem2reg_opt(Z_tos, aaddress(_masm, Z_tmp_1));
725 }
726
727 void TemplateTable::index_check(Register array, Register index, unsigned int shift) {
728 assert_different_registers(Z_R1_scratch, array, index);
729
730 // Check array.
731 __ null_check(array, Z_R0_scratch, arrayOopDesc::length_offset_in_bytes());
732
733 // Sign extend index for use by indexed load.
734 __ z_lgfr(index, index);
735
736 // Check index.
737 Label index_ok;
738 __ z_cl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
739 __ z_brl(index_ok);
740 __ lgr_if_needed(Z_ARG3, index); // See generate_ArrayIndexOutOfBounds_handler().
741 // Give back the array to create more detailed exceptions.
742 __ lgr_if_needed(Z_ARG2, array); // See generate_ArrayIndexOutOfBounds_handler().
743 __ load_absolute_address(Z_R1_scratch,
744 Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
745 __ z_bcr(Assembler::bcondAlways, Z_R1_scratch);
746 __ bind(index_ok);
747
748 if (shift > 0)
749 __ z_sllg(index, index, shift);
750 }
751
752 void TemplateTable::iaload() {
753 transition(itos, itos);
754
755 __ pop_ptr(Z_tmp_1); // array
756 // Index is in Z_tos.
757 Register index = Z_tos;
758 index_check(Z_tmp_1, index, LogBytesPerInt); // Kills Z_ARG3.
759 // Load the value.
760 __ mem2reg_opt(Z_tos,
761 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_INT)),
762 false);
763 }
764
765 void TemplateTable::laload() {
766 transition(itos, ltos);
767
768 __ pop_ptr(Z_tmp_2);
769 // Z_tos : index
770 // Z_tmp_2 : array
771 Register index = Z_tos;
772 index_check(Z_tmp_2, index, LogBytesPerLong);
773 __ mem2reg_opt(Z_tos,
774 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_LONG)));
775 }
776
777 void TemplateTable::faload() {
778 transition(itos, ftos);
779
780 __ pop_ptr(Z_tmp_2);
781 // Z_tos : index
782 // Z_tmp_2 : array
783 Register index = Z_tos;
784 index_check(Z_tmp_2, index, LogBytesPerInt);
785 __ mem2freg_opt(Z_ftos,
786 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
787 false);
788 }
789
790 void TemplateTable::daload() {
791 transition(itos, dtos);
792
793 __ pop_ptr(Z_tmp_2);
794 // Z_tos : index
795 // Z_tmp_2 : array
796 Register index = Z_tos;
797 index_check(Z_tmp_2, index, LogBytesPerLong);
798 __ mem2freg_opt(Z_ftos,
799 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
800 }
801
802 void TemplateTable::aaload() {
803 transition(itos, atos);
804
805 unsigned const int shift = LogBytesPerHeapOop;
806 __ pop_ptr(Z_tmp_1); // array
807 // Index is in Z_tos.
808 Register index = Z_tos;
809 index_check(Z_tmp_1, index, shift);
810 // Now load array element.
811 __ load_heap_oop(Z_tos,
812 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
813 __ verify_oop(Z_tos);
814 }
815
816 void TemplateTable::baload() {
817 transition(itos, itos);
818
819 __ pop_ptr(Z_tmp_1);
820 // Z_tos : index
821 // Z_tmp_1 : array
822 Register index = Z_tos;
823 index_check(Z_tmp_1, index, 0);
824 __ z_lb(Z_tos,
825 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
826 }
827
828 void TemplateTable::caload() {
829 transition(itos, itos);
830
831 __ pop_ptr(Z_tmp_2);
832 // Z_tos : index
833 // Z_tmp_2 : array
834 Register index = Z_tos;
835 index_check(Z_tmp_2, index, LogBytesPerShort);
836 // Load into 64 bits, works on all CPUs.
837 __ z_llgh(Z_tos,
838 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
839 }
840
841 // Iload followed by caload frequent pair.
842 void TemplateTable::fast_icaload() {
843 transition(vtos, itos);
844
845 // Load index out of locals.
846 locals_index(Z_R1_scratch);
847 __ mem2reg_opt(Z_ARG3, iaddress(_masm, Z_R1_scratch), false);
848 // Z_ARG3 : index
849 // Z_tmp_2 : array
850 __ pop_ptr(Z_tmp_2);
851 index_check(Z_tmp_2, Z_ARG3, LogBytesPerShort);
852 // Load into 64 bits, works on all CPUs.
853 __ z_llgh(Z_tos,
854 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
855 }
856
857 void TemplateTable::saload() {
858 transition(itos, itos);
859
860 __ pop_ptr(Z_tmp_2);
861 // Z_tos : index
862 // Z_tmp_2 : array
863 Register index = Z_tos;
864 index_check(Z_tmp_2, index, LogBytesPerShort);
865 __ z_lh(Z_tos,
866 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
867 }
868
869 void TemplateTable::iload(int n) {
870 transition(vtos, itos);
871 __ z_ly(Z_tos, iaddress(n));
872 }
873
874 void TemplateTable::lload(int n) {
875 transition(vtos, ltos);
876 __ z_lg(Z_tos, laddress(n));
877 }
878
879 void TemplateTable::fload(int n) {
880 transition(vtos, ftos);
881 __ mem2freg_opt(Z_ftos, faddress(n), false);
882 }
883
884 void TemplateTable::dload(int n) {
885 transition(vtos, dtos);
886 __ mem2freg_opt(Z_ftos, daddress(n));
887 }
888
889 void TemplateTable::aload(int n) {
890 transition(vtos, atos);
891 __ mem2reg_opt(Z_tos, aaddress(n));
892 }
893
894 void TemplateTable::aload_0() {
895 aload_0_internal();
896 }
897
898 void TemplateTable::nofast_aload_0() {
899 aload_0_internal(may_not_rewrite);
900 }
901
902 void TemplateTable::aload_0_internal(RewriteControl rc) {
903 transition(vtos, atos);
904
905 // According to bytecode histograms, the pairs:
906 //
907 // _aload_0, _fast_igetfield
908 // _aload_0, _fast_agetfield
909 // _aload_0, _fast_fgetfield
910 //
911 // occur frequently. If RewriteFrequentPairs is set, the (slow)
912 // _aload_0 bytecode checks if the next bytecode is either
913 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
914 // rewrites the current bytecode into a pair bytecode; otherwise it
915 // rewrites the current bytecode into _fast_aload_0 that doesn't do
916 // the pair check anymore.
917 //
918 // Note: If the next bytecode is _getfield, the rewrite must be
919 // delayed, otherwise we may miss an opportunity for a pair.
920 //
921 // Also rewrite frequent pairs
922 // aload_0, aload_1
923 // aload_0, iload_1
924 // These bytecodes with a small amount of code are most profitable
925 // to rewrite.
926 if (!(RewriteFrequentPairs && (rc == may_rewrite))) {
927 aload(0);
928 return;
929 }
930
931 NearLabel rewrite, done;
932 const Register bc = Z_ARG4;
933
934 assert(Z_R1_scratch != bc, "register damaged");
935 // Get next byte.
936 __ z_llgc(Z_R1_scratch, at_bcp(Bytecodes::length_for (Bytecodes::_aload_0)));
937
938 // Do actual aload_0.
939 aload(0);
940
941 // If _getfield then wait with rewrite.
942 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_getfield,
943 Assembler::bcondEqual, done);
944
945 // If _igetfield then rewrite to _fast_iaccess_0.
946 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0)
947 == Bytecodes::_aload_0, "fix bytecode definition");
948
949 __ load_const_optimized(bc, Bytecodes::_fast_iaccess_0);
950 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_igetfield,
951 Assembler::bcondEqual, rewrite);
952
953 // If _agetfield then rewrite to _fast_aaccess_0.
954 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0)
955 == Bytecodes::_aload_0, "fix bytecode definition");
956
957 __ load_const_optimized(bc, Bytecodes::_fast_aaccess_0);
958 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_agetfield,
959 Assembler::bcondEqual, rewrite);
960
961 // If _fgetfield then rewrite to _fast_faccess_0.
962 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0)
963 == Bytecodes::_aload_0, "fix bytecode definition");
964
965 __ load_const_optimized(bc, Bytecodes::_fast_faccess_0);
966 __ compareU32_and_branch(Z_R1_scratch, Bytecodes::_fast_fgetfield,
967 Assembler::bcondEqual, rewrite);
968
969 // Else rewrite to _fast_aload0.
970 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0)
971 == Bytecodes::_aload_0, "fix bytecode definition");
972 __ load_const_optimized(bc, Bytecodes::_fast_aload_0);
973
974 // rewrite
975 // bc: fast bytecode
976 __ bind(rewrite);
977
978 patch_bytecode(Bytecodes::_aload_0, bc, Z_R1_scratch, false);
979 // Reload local 0 because of VM call inside patch_bytecode().
980 // this may trigger GC and thus change the oop.
981 aload(0);
982
983 __ bind(done);
984 }
985
986 void TemplateTable::istore() {
987 transition(itos, vtos);
988 locals_index(Z_R1_scratch);
989 __ reg2mem_opt(Z_tos, iaddress(_masm, Z_R1_scratch), false);
990 }
991
992 void TemplateTable::lstore() {
993 transition(ltos, vtos);
994 locals_index(Z_R1_scratch);
995 __ reg2mem_opt(Z_tos, laddress(_masm, Z_R1_scratch));
996 }
997
998 void TemplateTable::fstore() {
999 transition(ftos, vtos);
1000 locals_index(Z_R1_scratch);
1001 __ freg2mem_opt(Z_ftos, faddress(_masm, Z_R1_scratch));
1002 }
1003
1004 void TemplateTable::dstore() {
1005 transition(dtos, vtos);
1006 locals_index(Z_R1_scratch);
1007 __ freg2mem_opt(Z_ftos, daddress(_masm, Z_R1_scratch));
1008 }
1009
1010 void TemplateTable::astore() {
1011 transition(vtos, vtos);
1012 __ pop_ptr(Z_tos);
1013 locals_index(Z_R1_scratch);
1014 __ reg2mem_opt(Z_tos, aaddress(_masm, Z_R1_scratch));
1015 }
1016
1017 void TemplateTable::wide_istore() {
1018 transition(vtos, vtos);
1019 __ pop_i(Z_tos);
1020 locals_index_wide(Z_tmp_1);
1021 __ reg2mem_opt(Z_tos, iaddress(_masm, Z_tmp_1), false);
1022 }
1023
1024 void TemplateTable::wide_lstore() {
1025 transition(vtos, vtos);
1026 __ pop_l(Z_tos);
1027 locals_index_wide(Z_tmp_1);
1028 __ reg2mem_opt(Z_tos, laddress(_masm, Z_tmp_1));
1029 }
1030
1031 void TemplateTable::wide_fstore() {
1032 transition(vtos, vtos);
1033 __ pop_f(Z_ftos);
1034 locals_index_wide(Z_tmp_1);
1035 __ freg2mem_opt(Z_ftos, faddress(_masm, Z_tmp_1), false);
1036 }
1037
1038 void TemplateTable::wide_dstore() {
1039 transition(vtos, vtos);
1040 __ pop_d(Z_ftos);
1041 locals_index_wide(Z_tmp_1);
1042 __ freg2mem_opt(Z_ftos, daddress(_masm, Z_tmp_1));
1043 }
1044
1045 void TemplateTable::wide_astore() {
1046 transition(vtos, vtos);
1047 __ pop_ptr(Z_tos);
1048 locals_index_wide(Z_tmp_1);
1049 __ reg2mem_opt(Z_tos, aaddress(_masm, Z_tmp_1));
1050 }
1051
1052 void TemplateTable::iastore() {
1053 transition(itos, vtos);
1054
1055 Register index = Z_ARG3; // Index_check expects index in Z_ARG3.
1056 // Value is in Z_tos ...
1057 __ pop_i(index); // index
1058 __ pop_ptr(Z_tmp_1); // array
1059 index_check(Z_tmp_1, index, LogBytesPerInt);
1060 // ... and then move the value.
1061 __ reg2mem_opt(Z_tos,
1062 Address(Z_tmp_1, index, arrayOopDesc::base_offset_in_bytes(T_INT)),
1063 false);
1064 }
1065
1066 void TemplateTable::lastore() {
1067 transition(ltos, vtos);
1068
1069 __ pop_i(Z_ARG3);
1070 __ pop_ptr(Z_tmp_2);
1071 // Z_tos : value
1072 // Z_ARG3 : index
1073 // Z_tmp_2 : array
1074 index_check(Z_tmp_2, Z_ARG3, LogBytesPerLong); // Prefer index in Z_ARG3.
1075 __ reg2mem_opt(Z_tos,
1076 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_LONG)));
1077 }
1078
1079 void TemplateTable::fastore() {
1080 transition(ftos, vtos);
1081
1082 __ pop_i(Z_ARG3);
1083 __ pop_ptr(Z_tmp_2);
1084 // Z_ftos : value
1085 // Z_ARG3 : index
1086 // Z_tmp_2 : array
1087 index_check(Z_tmp_2, Z_ARG3, LogBytesPerInt); // Prefer index in Z_ARG3.
1088 __ freg2mem_opt(Z_ftos,
1089 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1090 false);
1091 }
1092
1093 void TemplateTable::dastore() {
1094 transition(dtos, vtos);
1095
1096 __ pop_i(Z_ARG3);
1097 __ pop_ptr(Z_tmp_2);
1098 // Z_ftos : value
1099 // Z_ARG3 : index
1100 // Z_tmp_2 : array
1101 index_check(Z_tmp_2, Z_ARG3, LogBytesPerLong); // Prefer index in Z_ARG3.
1102 __ freg2mem_opt(Z_ftos,
1103 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
1104 }
1105
1106 void TemplateTable::aastore() {
1107 NearLabel is_null, ok_is_subtype, done;
1108 transition(vtos, vtos);
1109
1110 // stack: ..., array, index, value
1111
1112 Register Rvalue = Z_tos;
1113 Register Rarray = Z_ARG2;
1114 Register Rindex = Z_ARG3; // Convention for index_check().
1115
1116 __ load_ptr(0, Rvalue);
1117 __ z_l(Rindex, Address(Z_esp, Interpreter::expr_offset_in_bytes(1)));
1118 __ load_ptr(2, Rarray);
1119
1120 unsigned const int shift = LogBytesPerHeapOop;
1121 index_check(Rarray, Rindex, shift); // side effect: Rindex = Rindex << shift
1122 Register Rstore_addr = Rindex;
1123 // Address where the store goes to, i.e. &(Rarry[index])
1124 __ load_address(Rstore_addr, Address(Rarray, Rindex, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
1125
1126 // do array store check - check for NULL value first.
1127 __ compareU64_and_branch(Rvalue, (intptr_t)0, Assembler::bcondEqual, is_null);
1128
1129 Register Rsub_klass = Z_ARG4;
1130 Register Rsuper_klass = Z_ARG5;
1131 __ load_klass(Rsub_klass, Rvalue);
1132 // Load superklass.
1133 __ load_klass(Rsuper_klass, Rarray);
1134 __ z_lg(Rsuper_klass, Address(Rsuper_klass, ObjArrayKlass::element_klass_offset()));
1135
1136 // Generate a fast subtype check. Branch to ok_is_subtype if no failure.
1137 // Throw if failure.
1138 Register tmp1 = Z_tmp_1;
1139 Register tmp2 = Z_tmp_2;
1140 __ gen_subtype_check(Rsub_klass, Rsuper_klass, tmp1, tmp2, ok_is_subtype);
1141
1142 // Fall through on failure.
1143 // Object is in Rvalue == Z_tos.
1144 assert(Rvalue == Z_tos, "that's the expected location");
1145 __ load_absolute_address(tmp1, Interpreter::_throw_ArrayStoreException_entry);
1146 __ z_br(tmp1);
1147
1148 // Come here on success.
1149 __ bind(ok_is_subtype);
1150
1151 // Now store using the appropriate barrier.
1152 Register tmp3 = Rsub_klass;
1153 do_oop_store(_masm, Rstore_addr, (intptr_t)0/*offset*/, Rvalue, false/*val==null*/,
1154 tmp3, tmp2, tmp1, _bs->kind(), true);
1155 __ z_bru(done);
1156
1157 // Have a NULL in Rvalue.
1158 __ bind(is_null);
1159 __ profile_null_seen(tmp1);
1160
1161 // Store a NULL.
1162 do_oop_store(_masm, Rstore_addr, (intptr_t)0/*offset*/, Rvalue, true/*val==null*/,
1163 tmp3, tmp2, tmp1, _bs->kind(), true);
1164
1165 // Pop stack arguments.
1166 __ bind(done);
1167 __ add2reg(Z_esp, 3 * Interpreter::stackElementSize);
1168 }
1169
1170
1171 void TemplateTable::bastore() {
1172 transition(itos, vtos);
1173
1174 __ pop_i(Z_ARG3);
1175 __ pop_ptr(Z_tmp_2);
1176 // Z_tos : value
1177 // Z_ARG3 : index
1178 // Z_tmp_2 : array
1179 // No index shift necessary - pass 0.
1180 index_check(Z_tmp_2, Z_ARG3, 0); // Prefer index in Z_ARG3.
1181 __ z_stc(Z_tos,
1182 Address(Z_tmp_2, Z_ARG3, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
1183 }
1184
1185 void TemplateTable::castore() {
1186 transition(itos, vtos);
1187
1188 __ pop_i(Z_ARG3);
1189 __ pop_ptr(Z_tmp_2);
1190 // Z_tos : value
1191 // Z_ARG3 : index
1192 // Z_tmp_2 : array
1193 Register index = Z_ARG3; // prefer index in Z_ARG3
1194 index_check(Z_tmp_2, index, LogBytesPerShort);
1195 __ z_sth(Z_tos,
1196 Address(Z_tmp_2, index, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
1197 }
1198
1199 void TemplateTable::sastore() {
1200 castore();
1201 }
1202
1203 void TemplateTable::istore(int n) {
1204 transition(itos, vtos);
1205 __ reg2mem_opt(Z_tos, iaddress(n), false);
1206 }
1207
1208 void TemplateTable::lstore(int n) {
1209 transition(ltos, vtos);
1210 __ reg2mem_opt(Z_tos, laddress(n));
1211 }
1212
1213 void TemplateTable::fstore(int n) {
1214 transition(ftos, vtos);
1215 __ freg2mem_opt(Z_ftos, faddress(n), false);
1216 }
1217
1218 void TemplateTable::dstore(int n) {
1219 transition(dtos, vtos);
1220 __ freg2mem_opt(Z_ftos, daddress(n));
1221 }
1222
1223 void TemplateTable::astore(int n) {
1224 transition(vtos, vtos);
1225 __ pop_ptr(Z_tos);
1226 __ reg2mem_opt(Z_tos, aaddress(n));
1227 }
1228
1229 void TemplateTable::pop() {
1230 transition(vtos, vtos);
1231 __ add2reg(Z_esp, Interpreter::stackElementSize);
1232 }
1233
1234 void TemplateTable::pop2() {
1235 transition(vtos, vtos);
1236 __ add2reg(Z_esp, 2 * Interpreter::stackElementSize);
1237 }
1238
1239 void TemplateTable::dup() {
1240 transition(vtos, vtos);
1241 __ load_ptr(0, Z_tos);
1242 __ push_ptr(Z_tos);
1243 // stack: ..., a, a
1244 }
1245
1246 void TemplateTable::dup_x1() {
1247 transition(vtos, vtos);
1248
1249 // stack: ..., a, b
1250 __ load_ptr(0, Z_tos); // load b
1251 __ load_ptr(1, Z_R0_scratch); // load a
1252 __ store_ptr(1, Z_tos); // store b
1253 __ store_ptr(0, Z_R0_scratch); // store a
1254 __ push_ptr(Z_tos); // push b
1255 // stack: ..., b, a, b
1256 }
1257
1258 void TemplateTable::dup_x2() {
1259 transition(vtos, vtos);
1260
1261 // stack: ..., a, b, c
1262 __ load_ptr(0, Z_R0_scratch); // load c
1263 __ load_ptr(2, Z_R1_scratch); // load a
1264 __ store_ptr(2, Z_R0_scratch); // store c in a
1265 __ push_ptr(Z_R0_scratch); // push c
1266 // stack: ..., c, b, c, c
1267 __ load_ptr(2, Z_R0_scratch); // load b
1268 __ store_ptr(2, Z_R1_scratch); // store a in b
1269 // stack: ..., c, a, c, c
1270 __ store_ptr(1, Z_R0_scratch); // store b in c
1271 // stack: ..., c, a, b, c
1272 }
1273
1274 void TemplateTable::dup2() {
1275 transition(vtos, vtos);
1276
1277 // stack: ..., a, b
1278 __ load_ptr(1, Z_R0_scratch); // load a
1279 __ push_ptr(Z_R0_scratch); // push a
1280 __ load_ptr(1, Z_R0_scratch); // load b
1281 __ push_ptr(Z_R0_scratch); // push b
1282 // stack: ..., a, b, a, b
1283 }
1284
1285 void TemplateTable::dup2_x1() {
1286 transition(vtos, vtos);
1287
1288 // stack: ..., a, b, c
1289 __ load_ptr(0, Z_R0_scratch); // load c
1290 __ load_ptr(1, Z_R1_scratch); // load b
1291 __ push_ptr(Z_R1_scratch); // push b
1292 __ push_ptr(Z_R0_scratch); // push c
1293 // stack: ..., a, b, c, b, c
1294 __ store_ptr(3, Z_R0_scratch); // store c in b
1295 // stack: ..., a, c, c, b, c
1296 __ load_ptr( 4, Z_R0_scratch); // load a
1297 __ store_ptr(2, Z_R0_scratch); // store a in 2nd c
1298 // stack: ..., a, c, a, b, c
1299 __ store_ptr(4, Z_R1_scratch); // store b in a
1300 // stack: ..., b, c, a, b, c
1301 }
1302
1303 void TemplateTable::dup2_x2() {
1304 transition(vtos, vtos);
1305
1306 // stack: ..., a, b, c, d
1307 __ load_ptr(0, Z_R0_scratch); // load d
1308 __ load_ptr(1, Z_R1_scratch); // load c
1309 __ push_ptr(Z_R1_scratch); // push c
1310 __ push_ptr(Z_R0_scratch); // push d
1311 // stack: ..., a, b, c, d, c, d
1312 __ load_ptr(4, Z_R1_scratch); // load b
1313 __ store_ptr(2, Z_R1_scratch); // store b in d
1314 __ store_ptr(4, Z_R0_scratch); // store d in b
1315 // stack: ..., a, d, c, b, c, d
1316 __ load_ptr(5, Z_R0_scratch); // load a
1317 __ load_ptr(3, Z_R1_scratch); // load c
1318 __ store_ptr(3, Z_R0_scratch); // store a in c
1319 __ store_ptr(5, Z_R1_scratch); // store c in a
1320 // stack: ..., c, d, a, b, c, d
1321 }
1322
1323 void TemplateTable::swap() {
1324 transition(vtos, vtos);
1325
1326 // stack: ..., a, b
1327 __ load_ptr(1, Z_R0_scratch); // load a
1328 __ load_ptr(0, Z_R1_scratch); // load b
1329 __ store_ptr(0, Z_R0_scratch); // store a in b
1330 __ store_ptr(1, Z_R1_scratch); // store b in a
1331 // stack: ..., b, a
1332 }
1333
1334 void TemplateTable::iop2(Operation op) {
1335 transition(itos, itos);
1336 switch (op) {
1337 case add : __ z_ay(Z_tos, __ stackTop()); __ pop_i(); break;
1338 case sub : __ z_sy(Z_tos, __ stackTop()); __ pop_i(); __ z_lcr(Z_tos, Z_tos); break;
1339 case mul : __ z_msy(Z_tos, __ stackTop()); __ pop_i(); break;
1340 case _and : __ z_ny(Z_tos, __ stackTop()); __ pop_i(); break;
1341 case _or : __ z_oy(Z_tos, __ stackTop()); __ pop_i(); break;
1342 case _xor : __ z_xy(Z_tos, __ stackTop()); __ pop_i(); break;
1343 case shl : __ z_lr(Z_tmp_1, Z_tos);
1344 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount.
1345 __ pop_i(Z_tos); __ z_sll(Z_tos, 0, Z_tmp_1); break;
1346 case shr : __ z_lr(Z_tmp_1, Z_tos);
1347 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount.
1348 __ pop_i(Z_tos); __ z_sra(Z_tos, 0, Z_tmp_1); break;
1349 case ushr : __ z_lr(Z_tmp_1, Z_tos);
1350 __ z_nill(Z_tmp_1, 31); // Lowest 5 bits are shiftamount.
1351 __ pop_i(Z_tos); __ z_srl(Z_tos, 0, Z_tmp_1); break;
1352 default : ShouldNotReachHere(); break;
1353 }
1354 return;
1355 }
1356
1357 void TemplateTable::lop2(Operation op) {
1358 transition(ltos, ltos);
1359
1360 switch (op) {
1361 case add : __ z_ag(Z_tos, __ stackTop()); __ pop_l(); break;
1362 case sub : __ z_sg(Z_tos, __ stackTop()); __ pop_l(); __ z_lcgr(Z_tos, Z_tos); break;
1363 case mul : __ z_msg(Z_tos, __ stackTop()); __ pop_l(); break;
1364 case _and : __ z_ng(Z_tos, __ stackTop()); __ pop_l(); break;
1365 case _or : __ z_og(Z_tos, __ stackTop()); __ pop_l(); break;
1366 case _xor : __ z_xg(Z_tos, __ stackTop()); __ pop_l(); break;
1367 default : ShouldNotReachHere(); break;
1368 }
1369 return;
1370 }
1371
1372 // Common part of idiv/irem.
1373 static void idiv_helper(InterpreterMacroAssembler * _masm, address exception) {
1374 NearLabel not_null;
1375
1376 // Use register pair Z_tmp_1, Z_tmp_2 for DIVIDE SINGLE.
1377 assert(Z_tmp_1->successor() == Z_tmp_2, " need even/odd register pair for idiv/irem");
1378
1379 // Get dividend.
1380 __ pop_i(Z_tmp_2);
1381
1382 // If divisor == 0 throw exception.
1383 __ compare32_and_branch(Z_tos, (intptr_t) 0,
1384 Assembler::bcondNotEqual, not_null );
1385 __ load_absolute_address(Z_R1_scratch, exception);
1386 __ z_br(Z_R1_scratch);
1387
1388 __ bind(not_null);
1389
1390 __ z_lgfr(Z_tmp_2, Z_tmp_2); // Sign extend dividend.
1391 __ z_dsgfr(Z_tmp_1, Z_tos); // Do it.
1392 }
1393
1394 void TemplateTable::idiv() {
1395 transition(itos, itos);
1396
1397 idiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry);
1398 __ z_llgfr(Z_tos, Z_tmp_2); // Result is in Z_tmp_2.
1399 }
1400
1401 void TemplateTable::irem() {
1402 transition(itos, itos);
1403
1404 idiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry);
1405 __ z_llgfr(Z_tos, Z_tmp_1); // Result is in Z_tmp_1.
1406 }
1407
1408 void TemplateTable::lmul() {
1409 transition(ltos, ltos);
1410
1411 // Multiply with memory operand.
1412 __ z_msg(Z_tos, __ stackTop());
1413 __ pop_l(); // Pop operand.
1414 }
1415
1416 // Common part of ldiv/lrem.
1417 //
1418 // Input:
1419 // Z_tos := the divisor (dividend still on stack)
1420 //
1421 // Updated registers:
1422 // Z_tmp_1 := pop_l() % Z_tos ; if is_ldiv == false
1423 // Z_tmp_2 := pop_l() / Z_tos ; if is_ldiv == true
1424 //
1425 static void ldiv_helper(InterpreterMacroAssembler * _masm, address exception, bool is_ldiv) {
1426 NearLabel not_null, done;
1427
1428 // Use register pair Z_tmp_1, Z_tmp_2 for DIVIDE SINGLE.
1429 assert(Z_tmp_1->successor() == Z_tmp_2,
1430 " need even/odd register pair for idiv/irem");
1431
1432 // Get dividend.
1433 __ pop_l(Z_tmp_2);
1434
1435 // If divisor == 0 throw exception.
1436 __ compare64_and_branch(Z_tos, (intptr_t)0, Assembler::bcondNotEqual, not_null);
1437 __ load_absolute_address(Z_R1_scratch, exception);
1438 __ z_br(Z_R1_scratch);
1439
1440 __ bind(not_null);
1441 // Special case for dividend == 0x8000 and divisor == -1.
1442 if (is_ldiv) {
1443 // result := Z_tmp_2 := - dividend
1444 __ z_lcgr(Z_tmp_2, Z_tmp_2);
1445 } else {
1446 // result remainder := Z_tmp_1 := 0
1447 __ clear_reg(Z_tmp_1, true, false); // Don't set CC.
1448 }
1449
1450 // if divisor == -1 goto done
1451 __ compare64_and_branch(Z_tos, -1, Assembler::bcondEqual, done);
1452 if (is_ldiv)
1453 // Restore sign, because divisor != -1.
1454 __ z_lcgr(Z_tmp_2, Z_tmp_2);
1455 __ z_dsgr(Z_tmp_1, Z_tos); // Do it.
1456 __ bind(done);
1457 }
1458
1459 void TemplateTable::ldiv() {
1460 transition(ltos, ltos);
1461
1462 ldiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry, true /*is_ldiv*/);
1463 __ z_lgr(Z_tos, Z_tmp_2); // Result is in Z_tmp_2.
1464 }
1465
1466 void TemplateTable::lrem() {
1467 transition(ltos, ltos);
1468
1469 ldiv_helper(_masm, Interpreter::_throw_ArithmeticException_entry, false /*is_ldiv*/);
1470 __ z_lgr(Z_tos, Z_tmp_1); // Result is in Z_tmp_1.
1471 }
1472
1473 void TemplateTable::lshl() {
1474 transition(itos, ltos);
1475
1476 // Z_tos: shift amount
1477 __ pop_l(Z_tmp_1); // Get shift value.
1478 __ z_sllg(Z_tos, Z_tmp_1, 0, Z_tos);
1479 }
1480
1481 void TemplateTable::lshr() {
1482 transition(itos, ltos);
1483
1484 // Z_tos: shift amount
1485 __ pop_l(Z_tmp_1); // Get shift value.
1486 __ z_srag(Z_tos, Z_tmp_1, 0, Z_tos);
1487 }
1488
1489 void TemplateTable::lushr() {
1490 transition(itos, ltos);
1491
1492 // Z_tos: shift amount
1493 __ pop_l(Z_tmp_1); // Get shift value.
1494 __ z_srlg(Z_tos, Z_tmp_1, 0, Z_tos);
1495 }
1496
1497 void TemplateTable::fop2(Operation op) {
1498 transition(ftos, ftos);
1499
1500 switch (op) {
1501 case add:
1502 // Add memory operand.
1503 __ z_aeb(Z_ftos, __ stackTop()); __ pop_f(); return;
1504 case sub:
1505 // Sub memory operand.
1506 __ z_ler(Z_F1, Z_ftos); // first operand
1507 __ pop_f(Z_ftos); // second operand from stack
1508 __ z_sebr(Z_ftos, Z_F1);
1509 return;
1510 case mul:
1511 // Multiply with memory operand.
1512 __ z_meeb(Z_ftos, __ stackTop()); __ pop_f(); return;
1513 case div:
1514 __ z_ler(Z_F1, Z_ftos); // first operand
1515 __ pop_f(Z_ftos); // second operand from stack
1516 __ z_debr(Z_ftos, Z_F1);
1517 return;
1518 case rem:
1519 // Do runtime call.
1520 __ z_ler(Z_FARG2, Z_ftos); // divisor
1521 __ pop_f(Z_FARG1); // dividend
1522 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1523 // Result should be in the right place (Z_ftos == Z_FRET).
1524 return;
1525 default:
1526 ShouldNotReachHere();
1527 return;
1528 }
1529 }
1530
1531 void TemplateTable::dop2(Operation op) {
1532 transition(dtos, dtos);
1533
1534 switch (op) {
1535 case add:
1536 // Add memory operand.
1537 __ z_adb(Z_ftos, __ stackTop()); __ pop_d(); return;
1538 case sub:
1539 // Sub memory operand.
1540 __ z_ldr(Z_F1, Z_ftos); // first operand
1541 __ pop_d(Z_ftos); // second operand from stack
1542 __ z_sdbr(Z_ftos, Z_F1);
1543 return;
1544 case mul:
1545 // Multiply with memory operand.
1546 __ z_mdb(Z_ftos, __ stackTop()); __ pop_d(); return;
1547 case div:
1548 __ z_ldr(Z_F1, Z_ftos); // first operand
1549 __ pop_d(Z_ftos); // second operand from stack
1550 __ z_ddbr(Z_ftos, Z_F1);
1551 return;
1552 case rem:
1553 // Do runtime call.
1554 __ z_ldr(Z_FARG2, Z_ftos); // divisor
1555 __ pop_d(Z_FARG1); // dividend
1556 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1557 // Result should be in the right place (Z_ftos == Z_FRET).
1558 return;
1559 default:
1560 ShouldNotReachHere();
1561 return;
1562 }
1563 }
1564
1565 void TemplateTable::ineg() {
1566 transition(itos, itos);
1567 __ z_lcr(Z_tos);
1568 }
1569
1570 void TemplateTable::lneg() {
1571 transition(ltos, ltos);
1572 __ z_lcgr(Z_tos);
1573 }
1574
1575 void TemplateTable::fneg() {
1576 transition(ftos, ftos);
1577 __ z_lcebr(Z_ftos, Z_ftos);
1578 }
1579
1580 void TemplateTable::dneg() {
1581 transition(dtos, dtos);
1582 __ z_lcdbr(Z_ftos, Z_ftos);
1583 }
1584
1585 void TemplateTable::iinc() {
1586 transition(vtos, vtos);
1587
1588 Address local;
1589 __ z_lb(Z_R0_scratch, at_bcp(2)); // Get constant.
1590 locals_index(Z_R1_scratch);
1591 local = iaddress(_masm, Z_R1_scratch);
1592 __ z_a(Z_R0_scratch, local);
1593 __ reg2mem_opt(Z_R0_scratch, local, false);
1594 }
1595
1596 void TemplateTable::wide_iinc() {
1597 transition(vtos, vtos);
1598
1599 // Z_tmp_1 := increment
1600 __ get_2_byte_integer_at_bcp(Z_tmp_1, 4, InterpreterMacroAssembler::Signed);
1601 // Z_R1_scratch := index of local to increment
1602 locals_index_wide(Z_tmp_2);
1603 // Load, increment, and store.
1604 __ access_local_int(Z_tmp_2, Z_tos);
1605 __ z_agr(Z_tos, Z_tmp_1);
1606 // Shifted index is still in Z_tmp_2.
1607 __ reg2mem_opt(Z_tos, Address(Z_locals, Z_tmp_2), false);
1608 }
1609
1610
1611 void TemplateTable::convert() {
1612 // Checking
1613 #ifdef ASSERT
1614 TosState tos_in = ilgl;
1615 TosState tos_out = ilgl;
1616
1617 switch (bytecode()) {
1618 case Bytecodes::_i2l:
1619 case Bytecodes::_i2f:
1620 case Bytecodes::_i2d:
1621 case Bytecodes::_i2b:
1622 case Bytecodes::_i2c:
1623 case Bytecodes::_i2s:
1624 tos_in = itos;
1625 break;
1626 case Bytecodes::_l2i:
1627 case Bytecodes::_l2f:
1628 case Bytecodes::_l2d:
1629 tos_in = ltos;
1630 break;
1631 case Bytecodes::_f2i:
1632 case Bytecodes::_f2l:
1633 case Bytecodes::_f2d:
1634 tos_in = ftos;
1635 break;
1636 case Bytecodes::_d2i:
1637 case Bytecodes::_d2l:
1638 case Bytecodes::_d2f:
1639 tos_in = dtos;
1640 break;
1641 default :
1642 ShouldNotReachHere();
1643 }
1644 switch (bytecode()) {
1645 case Bytecodes::_l2i:
1646 case Bytecodes::_f2i:
1647 case Bytecodes::_d2i:
1648 case Bytecodes::_i2b:
1649 case Bytecodes::_i2c:
1650 case Bytecodes::_i2s:
1651 tos_out = itos;
1652 break;
1653 case Bytecodes::_i2l:
1654 case Bytecodes::_f2l:
1655 case Bytecodes::_d2l:
1656 tos_out = ltos;
1657 break;
1658 case Bytecodes::_i2f:
1659 case Bytecodes::_l2f:
1660 case Bytecodes::_d2f:
1661 tos_out = ftos;
1662 break;
1663 case Bytecodes::_i2d:
1664 case Bytecodes::_l2d:
1665 case Bytecodes::_f2d:
1666 tos_out = dtos;
1667 break;
1668 default :
1669 ShouldNotReachHere();
1670 }
1671
1672 transition(tos_in, tos_out);
1673 #endif // ASSERT
1674
1675 // Conversion
1676 Label done;
1677 switch (bytecode()) {
1678 case Bytecodes::_i2l:
1679 __ z_lgfr(Z_tos, Z_tos);
1680 return;
1681 case Bytecodes::_i2f:
1682 __ z_cefbr(Z_ftos, Z_tos);
1683 return;
1684 case Bytecodes::_i2d:
1685 __ z_cdfbr(Z_ftos, Z_tos);
1686 return;
1687 case Bytecodes::_i2b:
1688 // Sign extend least significant byte.
1689 __ move_reg_if_needed(Z_tos, T_BYTE, Z_tos, T_INT);
1690 return;
1691 case Bytecodes::_i2c:
1692 // Zero extend 2 least significant bytes.
1693 __ move_reg_if_needed(Z_tos, T_CHAR, Z_tos, T_INT);
1694 return;
1695 case Bytecodes::_i2s:
1696 // Sign extend 2 least significant bytes.
1697 __ move_reg_if_needed(Z_tos, T_SHORT, Z_tos, T_INT);
1698 return;
1699 case Bytecodes::_l2i:
1700 // Sign-extend not needed here, upper 4 bytes of int value in register are ignored.
1701 return;
1702 case Bytecodes::_l2f:
1703 __ z_cegbr(Z_ftos, Z_tos);
1704 return;
1705 case Bytecodes::_l2d:
1706 __ z_cdgbr(Z_ftos, Z_tos);
1707 return;
1708 case Bytecodes::_f2i:
1709 case Bytecodes::_f2l:
1710 __ clear_reg(Z_tos, true, false); // Don't set CC.
1711 __ z_cebr(Z_ftos, Z_ftos);
1712 __ z_brno(done); // NaN -> 0
1713 if (bytecode() == Bytecodes::_f2i)
1714 __ z_cfebr(Z_tos, Z_ftos, Assembler::to_zero);
1715 else // bytecode() == Bytecodes::_f2l
1716 __ z_cgebr(Z_tos, Z_ftos, Assembler::to_zero);
1717 break;
1718 case Bytecodes::_f2d:
1719 __ move_freg_if_needed(Z_ftos, T_DOUBLE, Z_ftos, T_FLOAT);
1720 return;
1721 case Bytecodes::_d2i:
1722 case Bytecodes::_d2l:
1723 __ clear_reg(Z_tos, true, false); // Ddon't set CC.
1724 __ z_cdbr(Z_ftos, Z_ftos);
1725 __ z_brno(done); // NaN -> 0
1726 if (bytecode() == Bytecodes::_d2i)
1727 __ z_cfdbr(Z_tos, Z_ftos, Assembler::to_zero);
1728 else // Bytecodes::_d2l
1729 __ z_cgdbr(Z_tos, Z_ftos, Assembler::to_zero);
1730 break;
1731 case Bytecodes::_d2f:
1732 __ move_freg_if_needed(Z_ftos, T_FLOAT, Z_ftos, T_DOUBLE);
1733 return;
1734 default:
1735 ShouldNotReachHere();
1736 }
1737 __ bind(done);
1738 }
1739
1740 void TemplateTable::lcmp() {
1741 transition(ltos, itos);
1742
1743 Label done;
1744 Register val1 = Z_R0_scratch;
1745 Register val2 = Z_R1_scratch;
1746
1747 if (VM_Version::has_LoadStoreConditional()) {
1748 __ pop_l(val1); // pop value 1.
1749 __ z_lghi(val2, -1); // lt value
1750 __ z_cgr(val1, Z_tos); // Compare with Z_tos (value 2). Protect CC under all circumstances.
1751 __ z_lghi(val1, 1); // gt value
1752 __ z_lghi(Z_tos, 0); // eq value
1753
1754 __ z_locgr(Z_tos, val1, Assembler::bcondHigh);
1755 __ z_locgr(Z_tos, val2, Assembler::bcondLow);
1756 } else {
1757 __ pop_l(val1); // Pop value 1.
1758 __ z_cgr(val1, Z_tos); // Compare with Z_tos (value 2). Protect CC under all circumstances.
1759
1760 __ z_lghi(Z_tos, 0); // eq value
1761 __ z_bre(done);
1762
1763 __ z_lghi(Z_tos, 1); // gt value
1764 __ z_brh(done);
1765
1766 __ z_lghi(Z_tos, -1); // lt value
1767 }
1768
1769 __ bind(done);
1770 }
1771
1772
1773 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1774 Label done;
1775
1776 if (is_float) {
1777 __ pop_f(Z_FARG2);
1778 __ z_cebr(Z_FARG2, Z_ftos);
1779 } else {
1780 __ pop_d(Z_FARG2);
1781 __ z_cdbr(Z_FARG2, Z_ftos);
1782 }
1783
1784 if (VM_Version::has_LoadStoreConditional()) {
1785 Register one = Z_R0_scratch;
1786 Register minus_one = Z_R1_scratch;
1787 __ z_lghi(minus_one, -1);
1788 __ z_lghi(one, 1);
1789 __ z_lghi(Z_tos, 0);
1790 __ z_locgr(Z_tos, one, unordered_result == 1 ? Assembler::bcondHighOrNotOrdered : Assembler::bcondHigh);
1791 __ z_locgr(Z_tos, minus_one, unordered_result == 1 ? Assembler::bcondLow : Assembler::bcondLowOrNotOrdered);
1792 } else {
1793 // Z_FARG2 == Z_ftos
1794 __ clear_reg(Z_tos, false, false);
1795 __ z_bre(done);
1796
1797 // F_ARG2 > Z_Ftos, or unordered
1798 __ z_lhi(Z_tos, 1);
1799 __ z_brc(unordered_result == 1 ? Assembler::bcondHighOrNotOrdered : Assembler::bcondHigh, done);
1800
1801 // F_ARG2 < Z_FTOS, or unordered
1802 __ z_lhi(Z_tos, -1);
1803
1804 __ bind(done);
1805 }
1806 }
1807
1808 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1809 const Register bumped_count = Z_tmp_1;
1810 const Register method = Z_tmp_2;
1811 const Register m_counters = Z_R1_scratch;
1812 const Register mdo = Z_tos;
1813
1814 BLOCK_COMMENT("TemplateTable::branch {");
1815 __ get_method(method);
1816 __ profile_taken_branch(mdo, bumped_count);
1817
1818 const ByteSize ctr_offset = InvocationCounter::counter_offset();
1819 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + ctr_offset;
1820 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + ctr_offset;
1821
1822 // Get (wide) offset to disp.
1823 const Register disp = Z_ARG5;
1824 if (is_wide) {
1825 __ get_4_byte_integer_at_bcp(disp, 1);
1826 } else {
1827 __ get_2_byte_integer_at_bcp(disp, 1, InterpreterMacroAssembler::Signed);
1828 }
1829
1830 // Handle all the JSR stuff here, then exit.
1831 // It's much shorter and cleaner than intermingling with the
1832 // non-JSR normal-branch stuff occurring below.
1833 if (is_jsr) {
1834 // Compute return address as bci in Z_tos.
1835 __ z_lgr(Z_R1_scratch, Z_bcp);
1836 __ z_sg(Z_R1_scratch, Address(method, Method::const_offset()));
1837 __ add2reg(Z_tos, (is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset()), Z_R1_scratch);
1838
1839 // Bump bcp to target of JSR.
1840 __ z_agr(Z_bcp, disp);
1841 // Push return address for "ret" on stack.
1842 __ push_ptr(Z_tos);
1843 // And away we go!
1844 __ dispatch_next(vtos);
1845 return;
1846 }
1847
1848 // Normal (non-jsr) branch handling.
1849
1850 // Bump bytecode pointer by displacement (take the branch).
1851 __ z_agr(Z_bcp, disp);
1852
1853 assert(UseLoopCounter || !UseOnStackReplacement,
1854 "on-stack-replacement requires loop counters");
1855
1856 NearLabel backedge_counter_overflow;
1857 NearLabel profile_method;
1858 NearLabel dispatch;
1859 int increment = InvocationCounter::count_increment;
1860
1861 if (UseLoopCounter) {
1862 // Increment backedge counter for backward branches.
1863 // disp: target offset
1864 // Z_bcp: target bcp
1865 // Z_locals: locals pointer
1866 //
1867 // Count only if backward branch.
1868 __ compare32_and_branch(disp, (intptr_t)0, Assembler::bcondHigh, dispatch);
1869
1870 if (TieredCompilation) {
1871 Label noCounters;
1872
1873 if (ProfileInterpreter) {
1874 NearLabel no_mdo;
1875
1876 // Are we profiling?
1877 __ load_and_test_long(mdo, Address(method, Method::method_data_offset()));
1878 __ branch_optimized(Assembler::bcondZero, no_mdo);
1879
1880 // Increment the MDO backedge counter.
1881 const Address mdo_backedge_counter(mdo, MethodData::backedge_counter_offset() + InvocationCounter::counter_offset());
1882
1883 const Address mask(mdo, MethodData::backedge_mask_offset());
1884 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1885 Z_ARG2, false, Assembler::bcondZero,
1886 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
1887 __ z_bru(dispatch);
1888 __ bind(no_mdo);
1889 }
1890
1891 // Increment backedge counter in MethodCounters*.
1892 __ get_method_counters(method, m_counters, noCounters);
1893 const Address mask(m_counters, MethodCounters::backedge_mask_offset());
1894 __ increment_mask_and_jump(Address(m_counters, be_offset),
1895 increment, mask,
1896 Z_ARG2, false, Assembler::bcondZero,
1897 UseOnStackReplacement ? &backedge_counter_overflow : NULL);
1898 __ bind(noCounters);
1899 } else {
1900 Register counter = Z_tos;
1901 Label noCounters;
1902 // Get address of MethodCounters object.
1903 __ get_method_counters(method, m_counters, noCounters);
1904 // Increment backedge counter.
1905 __ increment_backedge_counter(m_counters, counter);
1906
1907 if (ProfileInterpreter) {
1908 // Test to see if we should create a method data obj.
1909 __ z_cl(counter, Address(m_counters, MethodCounters::interpreter_profile_limit_offset()));
1910 __ z_brl(dispatch);
1911
1912 // If no method data exists, go to profile method.
1913 __ test_method_data_pointer(Z_ARG4/*result unused*/, profile_method);
1914
1915 if (UseOnStackReplacement) {
1916 // Check for overflow against 'bumped_count' which is the MDO taken count.
1917 __ z_cl(bumped_count, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset()));
1918 __ z_brl(dispatch);
1919
1920 // When ProfileInterpreter is on, the backedge_count comes
1921 // from the methodDataOop, which value does not get reset on
1922 // the call to frequency_counter_overflow(). To avoid
1923 // excessive calls to the overflow routine while the method is
1924 // being compiled, add a second test to make sure the overflow
1925 // function is called only once every overflow_frequency.
1926 const int overflow_frequency = 1024;
1927 __ and_imm(bumped_count, overflow_frequency - 1);
1928 __ z_brz(backedge_counter_overflow);
1929
1930 }
1931 } else {
1932 if (UseOnStackReplacement) {
1933 // Check for overflow against 'counter', which is the sum of the
1934 // counters.
1935 __ z_cl(counter, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset()));
1936 __ z_brh(backedge_counter_overflow);
1937 }
1938 }
1939 __ bind(noCounters);
1940 }
1941
1942 __ bind(dispatch);
1943 }
1944
1945 // Pre-load the next target bytecode into rbx.
1946 __ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0));
1947
1948 // Continue with the bytecode @ target.
1949 // Z_tos: Return bci for jsr's, unused otherwise.
1950 // Z_bytecode: target bytecode
1951 // Z_bcp: target bcp
1952 __ dispatch_only(vtos);
1953
1954 // Out-of-line code runtime calls.
1955 if (UseLoopCounter) {
1956 if (ProfileInterpreter) {
1957 // Out-of-line code to allocate method data oop.
1958 __ bind(profile_method);
1959
1960 __ call_VM(noreg,
1961 CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1962 __ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0)); // Restore target bytecode.
1963 __ set_method_data_pointer_for_bcp();
1964 __ z_bru(dispatch);
1965 }
1966
1967 if (UseOnStackReplacement) {
1968
1969 // invocation counter overflow
1970 __ bind(backedge_counter_overflow);
1971
1972 __ z_lcgr(Z_ARG2, disp); // Z_ARG2 := -disp
1973 __ z_agr(Z_ARG2, Z_bcp); // Z_ARG2 := branch target bcp - disp == branch bcp
1974 __ call_VM(noreg,
1975 CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
1976 Z_ARG2);
1977
1978 // Z_RET: osr nmethod (osr ok) or NULL (osr not possible).
1979 __ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch);
1980
1981 // Nmethod may have been invalidated (VM may block upon call_VM return).
1982 __ z_cliy(nmethod::state_offset(), Z_RET, nmethod::in_use);
1983 __ z_brne(dispatch);
1984
1985 // Migrate the interpreter frame off of the stack.
1986
1987 __ z_lgr(Z_tmp_1, Z_RET); // Save the nmethod.
1988
1989 call_VM(noreg,
1990 CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1991
1992 // Z_RET is OSR buffer, move it to expected parameter location.
1993 __ lgr_if_needed(Z_ARG1, Z_RET);
1994
1995 // Pop the interpreter frame ...
1996 __ pop_interpreter_frame(Z_R14, Z_ARG2/*tmp1*/, Z_ARG3/*tmp2*/);
1997
1998 // ... and begin the OSR nmethod.
1999 __ z_lg(Z_R1_scratch, Address(Z_tmp_1, nmethod::osr_entry_point_offset()));
2000 __ z_br(Z_R1_scratch);
2001 }
2002 }
2003 BLOCK_COMMENT("} TemplateTable::branch");
2004 }
2005
2006 void TemplateTable::if_0cmp(Condition cc) {
2007 transition(itos, vtos);
2008
2009 // Assume branch is more often taken than not (loops use backward branches).
2010 NearLabel not_taken;
2011 __ compare32_and_branch(Z_tos, (intptr_t) 0, j_not(cc), not_taken);
2012 branch(false, false);
2013 __ bind(not_taken);
2014 __ profile_not_taken_branch(Z_tos);
2015 }
2016
2017 void TemplateTable::if_icmp(Condition cc) {
2018 transition(itos, vtos);
2019
2020 // Assume branch is more often taken than not (loops use backward branches).
2021 NearLabel not_taken;
2022 __ pop_i(Z_R0_scratch);
2023 __ compare32_and_branch(Z_R0_scratch, Z_tos, j_not(cc), not_taken);
2024 branch(false, false);
2025 __ bind(not_taken);
2026 __ profile_not_taken_branch(Z_tos);
2027 }
2028
2029 void TemplateTable::if_nullcmp(Condition cc) {
2030 transition(atos, vtos);
2031
2032 // Assume branch is more often taken than not (loops use backward branches) .
2033 NearLabel not_taken;
2034 __ compare64_and_branch(Z_tos, (intptr_t) 0, j_not(cc), not_taken);
2035 branch(false, false);
2036 __ bind(not_taken);
2037 __ profile_not_taken_branch(Z_tos);
2038 }
2039
2040 void TemplateTable::if_acmp(Condition cc) {
2041 transition(atos, vtos);
2042 // Assume branch is more often taken than not (loops use backward branches).
2043 NearLabel not_taken;
2044 __ pop_ptr(Z_ARG2);
2045 __ verify_oop(Z_ARG2);
2046 __ verify_oop(Z_tos);
2047 __ compareU64_and_branch(Z_tos, Z_ARG2, j_not(cc), not_taken);
2048 branch(false, false);
2049 __ bind(not_taken);
2050 __ profile_not_taken_branch(Z_ARG3);
2051 }
2052
2053 void TemplateTable::ret() {
2054 transition(vtos, vtos);
2055
2056 locals_index(Z_tmp_1);
2057 // Get return bci, compute return bcp. Must load 64 bits.
2058 __ mem2reg_opt(Z_tmp_1, iaddress(_masm, Z_tmp_1));
2059 __ profile_ret(Z_tmp_1, Z_tmp_2);
2060 __ get_method(Z_tos);
2061 __ mem2reg_opt(Z_R1_scratch, Address(Z_tos, Method::const_offset()));
2062 __ load_address(Z_bcp, Address(Z_R1_scratch, Z_tmp_1, ConstMethod::codes_offset()));
2063 __ dispatch_next(vtos);
2064 }
2065
2066 void TemplateTable::wide_ret() {
2067 transition(vtos, vtos);
2068
2069 locals_index_wide(Z_tmp_1);
2070 // Get return bci, compute return bcp.
2071 __ mem2reg_opt(Z_tmp_1, aaddress(_masm, Z_tmp_1));
2072 __ profile_ret(Z_tmp_1, Z_tmp_2);
2073 __ get_method(Z_tos);
2074 __ mem2reg_opt(Z_R1_scratch, Address(Z_tos, Method::const_offset()));
2075 __ load_address(Z_bcp, Address(Z_R1_scratch, Z_tmp_1, ConstMethod::codes_offset()));
2076 __ dispatch_next(vtos);
2077 }
2078
2079 void TemplateTable::tableswitch () {
2080 transition(itos, vtos);
2081
2082 NearLabel default_case, continue_execution;
2083 Register bcp = Z_ARG5;
2084 // Align bcp.
2085 __ load_address(bcp, at_bcp(BytesPerInt));
2086 __ z_nill(bcp, (-BytesPerInt) & 0xffff);
2087
2088 // Load lo & hi.
2089 Register low = Z_tmp_1;
2090 Register high = Z_tmp_2;
2091
2092 // Load low into 64 bits, since used for address calculation.
2093 __ mem2reg_signed_opt(low, Address(bcp, BytesPerInt));
2094 __ mem2reg_opt(high, Address(bcp, 2 * BytesPerInt), false);
2095 // Sign extend "label" value for address calculation.
2096 __ z_lgfr(Z_tos, Z_tos);
2097
2098 // Check against lo & hi.
2099 __ compare32_and_branch(Z_tos, low, Assembler::bcondLow, default_case);
2100 __ compare32_and_branch(Z_tos, high, Assembler::bcondHigh, default_case);
2101
2102 // Lookup dispatch offset.
2103 __ z_sgr(Z_tos, low);
2104 Register jump_table_offset = Z_ARG3;
2105 // Index2offset; index in Z_tos is killed by profile_switch_case.
2106 __ z_sllg(jump_table_offset, Z_tos, LogBytesPerInt);
2107 __ profile_switch_case(Z_tos, Z_ARG4 /*tmp for mdp*/, low/*tmp*/, Z_bytecode/*tmp*/);
2108
2109 Register index = Z_tmp_2;
2110
2111 // Load index sign extended for addressing.
2112 __ mem2reg_signed_opt(index, Address(bcp, jump_table_offset, 3 * BytesPerInt));
2113
2114 // Continue execution.
2115 __ bind(continue_execution);
2116
2117 // Load next bytecode.
2118 __ z_llgc(Z_bytecode, Address(Z_bcp, index));
2119 __ z_agr(Z_bcp, index); // Advance bcp.
2120 __ dispatch_only(vtos);
2121
2122 // Handle default.
2123 __ bind(default_case);
2124
2125 __ profile_switch_default(Z_tos);
2126 __ mem2reg_signed_opt(index, Address(bcp));
2127 __ z_bru(continue_execution);
2128 }
2129
2130 void TemplateTable::lookupswitch () {
2131 transition(itos, itos);
2132 __ stop("lookupswitch bytecode should have been rewritten");
2133 }
2134
2135 void TemplateTable::fast_linearswitch () {
2136 transition(itos, vtos);
2137
2138 Label loop_entry, loop, found, continue_execution;
2139 Register bcp = Z_ARG5;
2140
2141 // Align bcp.
2142 __ load_address(bcp, at_bcp(BytesPerInt));
2143 __ z_nill(bcp, (-BytesPerInt) & 0xffff);
2144
2145 // Start search with last case.
2146 Register current_case_offset = Z_tmp_1;
2147
2148 __ mem2reg_signed_opt(current_case_offset, Address(bcp, BytesPerInt));
2149 __ z_sllg(current_case_offset, current_case_offset, LogBytesPerWord); // index2bytes
2150 __ z_bru(loop_entry);
2151
2152 // table search
2153 __ bind(loop);
2154
2155 __ z_c(Z_tos, Address(bcp, current_case_offset, 2 * BytesPerInt));
2156 __ z_bre(found);
2157
2158 __ bind(loop_entry);
2159 __ z_aghi(current_case_offset, -2 * BytesPerInt); // Decrement.
2160 __ z_brnl(loop);
2161
2162 // default case
2163 Register offset = Z_tmp_2;
2164
2165 __ profile_switch_default(Z_tos);
2166 // Load offset sign extended for addressing.
2167 __ mem2reg_signed_opt(offset, Address(bcp));
2168 __ z_bru(continue_execution);
2169
2170 // Entry found -> get offset.
2171 __ bind(found);
2172 __ mem2reg_signed_opt(offset, Address(bcp, current_case_offset, 3 * BytesPerInt));
2173 // Profile that this case was taken.
2174 Register current_case_idx = Z_ARG4;
2175 __ z_srlg(current_case_idx, current_case_offset, LogBytesPerWord); // bytes2index
2176 __ profile_switch_case(current_case_idx, Z_tos, bcp, Z_bytecode);
2177
2178 // Continue execution.
2179 __ bind(continue_execution);
2180
2181 // Load next bytecode.
2182 __ z_llgc(Z_bytecode, Address(Z_bcp, offset, 0));
2183 __ z_agr(Z_bcp, offset); // Advance bcp.
2184 __ dispatch_only(vtos);
2185 }
2186
2187
2188 void TemplateTable::fast_binaryswitch() {
2189
2190 transition(itos, vtos);
2191
2192 // Implementation using the following core algorithm:
2193 //
2194 // int binary_search(int key, LookupswitchPair* array, int n) {
2195 // // Binary search according to "Methodik des Programmierens" by
2196 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2197 // int i = 0;
2198 // int j = n;
2199 // while (i+1 < j) {
2200 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2201 // // with Q: for all i: 0 <= i < n: key < a[i]
2202 // // where a stands for the array and assuming that the (inexisting)
2203 // // element a[n] is infinitely big.
2204 // int h = (i + j) >> 1;
2205 // // i < h < j
2206 // if (key < array[h].fast_match()) {
2207 // j = h;
2208 // } else {
2209 // i = h;
2210 // }
2211 // }
2212 // // R: a[i] <= key < a[i+1] or Q
2213 // // (i.e., if key is within array, i is the correct index)
2214 // return i;
2215 // }
2216
2217 // Register allocation
2218 // Note: Since we use the indices in address operands, we do all the
2219 // computation in 64 bits.
2220 const Register key = Z_tos; // Already set (tosca).
2221 const Register array = Z_tmp_1;
2222 const Register i = Z_tmp_2;
2223 const Register j = Z_ARG5;
2224 const Register h = Z_ARG4;
2225 const Register temp = Z_R1_scratch;
2226
2227 // Find array start.
2228 __ load_address(array, at_bcp(3 * BytesPerInt));
2229 __ z_nill(array, (-BytesPerInt) & 0xffff); // align
2230
2231 // Initialize i & j.
2232 __ clear_reg(i, true, false); // i = 0; Don't set CC.
2233 __ mem2reg_signed_opt(j, Address(array, -BytesPerInt)); // j = length(array);
2234
2235 // And start.
2236 Label entry;
2237 __ z_bru(entry);
2238
2239 // binary search loop
2240 {
2241 NearLabel loop;
2242
2243 __ bind(loop);
2244
2245 // int h = (i + j) >> 1;
2246 __ add2reg_with_index(h, 0, i, j); // h = i + j;
2247 __ z_srag(h, h, 1); // h = (i + j) >> 1;
2248
2249 // if (key < array[h].fast_match()) {
2250 // j = h;
2251 // } else {
2252 // i = h;
2253 // }
2254
2255 // Convert array[h].match to native byte-ordering before compare.
2256 __ z_sllg(temp, h, LogBytesPerWord); // index2bytes
2257 __ mem2reg_opt(temp, Address(array, temp), false);
2258
2259 NearLabel else_;
2260
2261 __ compare32_and_branch(key, temp, Assembler::bcondNotLow, else_);
2262 // j = h if (key < array[h].fast_match())
2263 __ z_lgr(j, h);
2264 __ z_bru(entry); // continue
2265
2266 __ bind(else_);
2267
2268 // i = h if (key >= array[h].fast_match())
2269 __ z_lgr(i, h); // and fallthrough
2270
2271 // while (i+1 < j)
2272 __ bind(entry);
2273
2274 // if (i + 1 < j) continue search
2275 __ add2reg(h, 1, i);
2276 __ compare64_and_branch(h, j, Assembler::bcondLow, loop);
2277 }
2278
2279 // End of binary search, result index is i (must check again!).
2280 NearLabel default_case;
2281
2282 // h is no longer needed, so use it to hold the byte offset.
2283 __ z_sllg(h, i, LogBytesPerWord); // index2bytes
2284 __ mem2reg_opt(temp, Address(array, h), false);
2285 __ compare32_and_branch(key, temp, Assembler::bcondNotEqual, default_case);
2286
2287 // entry found -> j = offset
2288 __ mem2reg_signed_opt(j, Address(array, h, BytesPerInt));
2289 __ profile_switch_case(i, key, array, Z_bytecode);
2290 // Load next bytecode.
2291 __ z_llgc(Z_bytecode, Address(Z_bcp, j));
2292 __ z_agr(Z_bcp, j); // Advance bcp.
2293 __ dispatch_only(vtos);
2294
2295 // default case -> j = default offset
2296 __ bind(default_case);
2297
2298 __ profile_switch_default(i);
2299 __ mem2reg_signed_opt(j, Address(array, -2 * BytesPerInt));
2300 // Load next bytecode.
2301 __ z_llgc(Z_bytecode, Address(Z_bcp, j));
2302 __ z_agr(Z_bcp, j); // Advance bcp.
2303 __ dispatch_only(vtos);
2304 }
2305
2306 void TemplateTable::_return(TosState state) {
2307 transition(state, state);
2308 assert(_desc->calls_vm(),
2309 "inconsistent calls_vm information"); // call in remove_activation
2310
2311 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2312 Register Rthis = Z_ARG2;
2313 Register Rklass = Z_ARG5;
2314 Label skip_register_finalizer;
2315 assert(state == vtos, "only valid state");
2316 __ z_lg(Rthis, aaddress(0));
2317 __ load_klass(Rklass, Rthis);
2318 __ testbit(Address(Rklass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER));
2319 __ z_bfalse(skip_register_finalizer);
2320 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Rthis);
2321 __ bind(skip_register_finalizer);
2322 }
2323
2324 __ remove_activation(state, Z_R14);
2325 __ z_br(Z_R14);
2326 }
2327
2328 // ----------------------------------------------------------------------------
2329 // NOTE: Cpe_offset is already computed as byte offset, so we must not
2330 // shift it afterwards!
2331 void TemplateTable::resolve_cache_and_index(int byte_no,
2332 Register Rcache,
2333 Register cpe_offset,
2334 size_t index_size) {
2335 BLOCK_COMMENT("resolve_cache_and_index {");
2336 NearLabel resolved;
2337 const Register bytecode_in_cpcache = Z_R1_scratch;
2338 const int total_f1_offset = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset());
2339 assert_different_registers(Rcache, cpe_offset, bytecode_in_cpcache);
2340
2341 Bytecodes::Code code = bytecode();
2342 switch (code) {
2343 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2344 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2345 }
2346
2347 {
2348 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2349 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, cpe_offset, bytecode_in_cpcache, byte_no, 1, index_size);
2350 // Have we resolved this bytecode?
2351 __ compare32_and_branch(bytecode_in_cpcache, (int)code, Assembler::bcondEqual, resolved);
2352 }
2353
2354 // Resolve first time through.
2355 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2356 __ load_const_optimized(Z_ARG2, (int) code);
2357 __ call_VM(noreg, entry, Z_ARG2);
2358
2359 // Update registers with resolved info.
2360 __ get_cache_and_index_at_bcp(Rcache, cpe_offset, 1, index_size);
2361 __ bind(resolved);
2362 BLOCK_COMMENT("} resolve_cache_and_index");
2363 }
2364
2365 // The Rcache and index registers must be set before call.
2366 // Index is already a byte offset, don't shift!
2367 void TemplateTable::load_field_cp_cache_entry(Register obj,
2368 Register cache,
2369 Register index,
2370 Register off,
2371 Register flags,
2372 bool is_static = false) {
2373 assert_different_registers(cache, index, flags, off);
2374 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2375
2376 // Field offset
2377 __ mem2reg_opt(off, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2378 // Flags. Must load 64 bits.
2379 __ mem2reg_opt(flags, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2380
2381 // klass overwrite register
2382 if (is_static) {
2383 __ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2384 __ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset()));
2385 __ resolve_oop_handle(obj);
2386 }
2387 }
2388
2389 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2390 Register method,
2391 Register itable_index,
2392 Register flags,
2393 bool is_invokevirtual,
2394 bool is_invokevfinal, // unused
2395 bool is_invokedynamic) {
2396 BLOCK_COMMENT("load_invoke_cp_cache_entry {");
2397 // Setup registers.
2398 const Register cache = Z_ARG1;
2399 const Register cpe_offset= flags;
2400 const ByteSize base_off = ConstantPoolCache::base_offset();
2401 const ByteSize f1_off = ConstantPoolCacheEntry::f1_offset();
2402 const ByteSize f2_off = ConstantPoolCacheEntry::f2_offset();
2403 const ByteSize flags_off = ConstantPoolCacheEntry::flags_offset();
2404 const int method_offset = in_bytes(base_off + ((byte_no == f2_byte) ? f2_off : f1_off));
2405 const int flags_offset = in_bytes(base_off + flags_off);
2406 // Access constant pool cache fields.
2407 const int index_offset = in_bytes(base_off + f2_off);
2408
2409 assert_different_registers(method, itable_index, flags, cache);
2410 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2411
2412 if (is_invokevfinal) {
2413 // Already resolved.
2414 assert(itable_index == noreg, "register not used");
2415 __ get_cache_and_index_at_bcp(cache, cpe_offset, 1);
2416 } else {
2417 // Need to resolve.
2418 resolve_cache_and_index(byte_no, cache, cpe_offset, is_invokedynamic ? sizeof(u4) : sizeof(u2));
2419 }
2420 __ z_lg(method, Address(cache, cpe_offset, method_offset));
2421
2422 if (itable_index != noreg) {
2423 __ z_lg(itable_index, Address(cache, cpe_offset, index_offset));
2424 }
2425
2426 // Only load the lower 4 bytes and fill high bytes of flags with zeros.
2427 // Callers depend on this zero-extension!!!
2428 // Attention: overwrites cpe_offset == flags
2429 __ z_llgf(flags, Address(cache, cpe_offset, flags_offset + (BytesPerLong-BytesPerInt)));
2430
2431 BLOCK_COMMENT("} load_invoke_cp_cache_entry");
2432 }
2433
2434 // The registers cache and index expected to be set before call.
2435 // Correct values of the cache and index registers are preserved.
2436 void TemplateTable::jvmti_post_field_access(Register cache, Register index,
2437 bool is_static, bool has_tos) {
2438
2439 // Do the JVMTI work here to avoid disturbing the register state below.
2440 // We use c_rarg registers here because we want to use the register used in
2441 // the call to the VM
2442 if (!JvmtiExport::can_post_field_access()) {
2443 return;
2444 }
2445
2446 // Check to see if a field access watch has been set before we
2447 // take the time to call into the VM.
2448 Label exit;
2449 assert_different_registers(cache, index, Z_tos);
2450 __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_access_count_addr());
2451 __ load_and_test_int(Z_R0, Address(Z_tos));
2452 __ z_brz(exit);
2453
2454 // Index is returned as byte offset, do not shift!
2455 __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1);
2456
2457 // cache entry pointer
2458 __ add2reg_with_index(Z_ARG3,
2459 in_bytes(ConstantPoolCache::base_offset()),
2460 Z_ARG3, Z_R1_scratch);
2461
2462 if (is_static) {
2463 __ clear_reg(Z_ARG2, true, false); // NULL object reference. Don't set CC.
2464 } else {
2465 __ mem2reg_opt(Z_ARG2, at_tos()); // Get object pointer without popping it.
2466 __ verify_oop(Z_ARG2);
2467 }
2468 // Z_ARG2: object pointer or NULL
2469 // Z_ARG3: cache entry pointer
2470 __ call_VM(noreg,
2471 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2472 Z_ARG2, Z_ARG3);
2473 __ get_cache_and_index_at_bcp(cache, index, 1);
2474
2475 __ bind(exit);
2476 }
2477
2478 void TemplateTable::pop_and_check_object(Register r) {
2479 __ pop_ptr(r);
2480 __ null_check(r); // for field access must check obj.
2481 __ verify_oop(r);
2482 }
2483
2484 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2485 transition(vtos, vtos);
2486
2487 const Register cache = Z_tmp_1;
2488 const Register index = Z_tmp_2;
2489 const Register obj = Z_tmp_1;
2490 const Register off = Z_ARG2;
2491 const Register flags = Z_ARG1;
2492 const Register bc = Z_tmp_1; // Uses same reg as obj, so don't mix them.
2493
2494 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2495 jvmti_post_field_access(cache, index, is_static, false);
2496 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2497
2498 if (!is_static) {
2499 // Obj is on the stack.
2500 pop_and_check_object(obj);
2501 }
2502
2503 // Displacement is 0, so any store instruction will be fine on any CPU.
2504 const Address field(obj, off);
2505
2506 Label is_Byte, is_Bool, is_Int, is_Short, is_Char,
2507 is_Long, is_Float, is_Object, is_Double;
2508 Label is_badState8, is_badState9, is_badStateA, is_badStateB,
2509 is_badStateC, is_badStateD, is_badStateE, is_badStateF,
2510 is_badState;
2511 Label branchTable, atosHandler, Done;
2512 Register br_tab = Z_R1_scratch;
2513 bool do_rewrite = !is_static && (rc == may_rewrite);
2514 bool dont_rewrite = (is_static || (rc == may_not_rewrite));
2515
2516 assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that");
2517 assert(btos == 0, "change code, btos != 0");
2518
2519 // Calculate branch table size. Generated code size depends on ASSERT and on bytecode rewriting.
2520 #ifdef ASSERT
2521 const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4;
2522 #else
2523 const unsigned int bsize = dont_rewrite ? BTB_MINSIZE*1 : BTB_MINSIZE*4;
2524 #endif
2525
2526 // Calculate address of branch table entry and branch there.
2527 {
2528 const int bit_shift = exact_log2(bsize); // Size of each branch table entry.
2529 const int r_bitpos = 63 - bit_shift;
2530 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1;
2531 const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift);
2532 __ z_larl(br_tab, branchTable);
2533 __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true);
2534 }
2535 __ z_bc(Assembler::bcondAlways, 0, flags, br_tab);
2536
2537 __ align_address(bsize);
2538 BIND(branchTable);
2539
2540 // btos
2541 BTB_BEGIN(is_Byte, bsize, "getfield_or_static:is_Byte");
2542 __ z_lb(Z_tos, field);
2543 __ push(btos);
2544 // Rewrite bytecode to be faster.
2545 if (do_rewrite) {
2546 patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5);
2547 }
2548 __ z_bru(Done);
2549 BTB_END(is_Byte, bsize, "getfield_or_static:is_Byte");
2550
2551 // ztos
2552 BTB_BEGIN(is_Bool, bsize, "getfield_or_static:is_Bool");
2553 __ z_lb(Z_tos, field);
2554 __ push(ztos);
2555 // Rewrite bytecode to be faster.
2556 if (do_rewrite) {
2557 // Use btos rewriting, no truncating to t/f bit is needed for getfield.
2558 patch_bytecode(Bytecodes::_fast_bgetfield, bc, Z_ARG5);
2559 }
2560 __ z_bru(Done);
2561 BTB_END(is_Bool, bsize, "getfield_or_static:is_Bool");
2562
2563 // ctos
2564 BTB_BEGIN(is_Char, bsize, "getfield_or_static:is_Char");
2565 // Load into 64 bits, works on all CPUs.
2566 __ z_llgh(Z_tos, field);
2567 __ push(ctos);
2568 // Rewrite bytecode to be faster.
2569 if (do_rewrite) {
2570 patch_bytecode(Bytecodes::_fast_cgetfield, bc, Z_ARG5);
2571 }
2572 __ z_bru(Done);
2573 BTB_END(is_Char, bsize, "getfield_or_static:is_Char");
2574
2575 // stos
2576 BTB_BEGIN(is_Short, bsize, "getfield_or_static:is_Short");
2577 __ z_lh(Z_tos, field);
2578 __ push(stos);
2579 // Rewrite bytecode to be faster.
2580 if (do_rewrite) {
2581 patch_bytecode(Bytecodes::_fast_sgetfield, bc, Z_ARG5);
2582 }
2583 __ z_bru(Done);
2584 BTB_END(is_Short, bsize, "getfield_or_static:is_Short");
2585
2586 // itos
2587 BTB_BEGIN(is_Int, bsize, "getfield_or_static:is_Int");
2588 __ mem2reg_opt(Z_tos, field, false);
2589 __ push(itos);
2590 // Rewrite bytecode to be faster.
2591 if (do_rewrite) {
2592 patch_bytecode(Bytecodes::_fast_igetfield, bc, Z_ARG5);
2593 }
2594 __ z_bru(Done);
2595 BTB_END(is_Int, bsize, "getfield_or_static:is_Int");
2596
2597 // ltos
2598 BTB_BEGIN(is_Long, bsize, "getfield_or_static:is_Long");
2599 __ mem2reg_opt(Z_tos, field);
2600 __ push(ltos);
2601 // Rewrite bytecode to be faster.
2602 if (do_rewrite) {
2603 patch_bytecode(Bytecodes::_fast_lgetfield, bc, Z_ARG5);
2604 }
2605 __ z_bru(Done);
2606 BTB_END(is_Long, bsize, "getfield_or_static:is_Long");
2607
2608 // ftos
2609 BTB_BEGIN(is_Float, bsize, "getfield_or_static:is_Float");
2610 __ mem2freg_opt(Z_ftos, field, false);
2611 __ push(ftos);
2612 // Rewrite bytecode to be faster.
2613 if (do_rewrite) {
2614 patch_bytecode(Bytecodes::_fast_fgetfield, bc, Z_ARG5);
2615 }
2616 __ z_bru(Done);
2617 BTB_END(is_Float, bsize, "getfield_or_static:is_Float");
2618
2619 // dtos
2620 BTB_BEGIN(is_Double, bsize, "getfield_or_static:is_Double");
2621 __ mem2freg_opt(Z_ftos, field);
2622 __ push(dtos);
2623 // Rewrite bytecode to be faster.
2624 if (do_rewrite) {
2625 patch_bytecode(Bytecodes::_fast_dgetfield, bc, Z_ARG5);
2626 }
2627 __ z_bru(Done);
2628 BTB_END(is_Double, bsize, "getfield_or_static:is_Double");
2629
2630 // atos
2631 BTB_BEGIN(is_Object, bsize, "getfield_or_static:is_Object");
2632 __ z_bru(atosHandler);
2633 BTB_END(is_Object, bsize, "getfield_or_static:is_Object");
2634
2635 // Bad state detection comes at no extra runtime cost.
2636 BTB_BEGIN(is_badState8, bsize, "getfield_or_static:is_badState8");
2637 __ z_illtrap();
2638 __ z_bru(is_badState);
2639 BTB_END( is_badState8, bsize, "getfield_or_static:is_badState8");
2640 BTB_BEGIN(is_badState9, bsize, "getfield_or_static:is_badState9");
2641 __ z_illtrap();
2642 __ z_bru(is_badState);
2643 BTB_END( is_badState9, bsize, "getfield_or_static:is_badState9");
2644 BTB_BEGIN(is_badStateA, bsize, "getfield_or_static:is_badStateA");
2645 __ z_illtrap();
2646 __ z_bru(is_badState);
2647 BTB_END( is_badStateA, bsize, "getfield_or_static:is_badStateA");
2648 BTB_BEGIN(is_badStateB, bsize, "getfield_or_static:is_badStateB");
2649 __ z_illtrap();
2650 __ z_bru(is_badState);
2651 BTB_END( is_badStateB, bsize, "getfield_or_static:is_badStateB");
2652 BTB_BEGIN(is_badStateC, bsize, "getfield_or_static:is_badStateC");
2653 __ z_illtrap();
2654 __ z_bru(is_badState);
2655 BTB_END( is_badStateC, bsize, "getfield_or_static:is_badStateC");
2656 BTB_BEGIN(is_badStateD, bsize, "getfield_or_static:is_badStateD");
2657 __ z_illtrap();
2658 __ z_bru(is_badState);
2659 BTB_END( is_badStateD, bsize, "getfield_or_static:is_badStateD");
2660 BTB_BEGIN(is_badStateE, bsize, "getfield_or_static:is_badStateE");
2661 __ z_illtrap();
2662 __ z_bru(is_badState);
2663 BTB_END( is_badStateE, bsize, "getfield_or_static:is_badStateE");
2664 BTB_BEGIN(is_badStateF, bsize, "getfield_or_static:is_badStateF");
2665 __ z_illtrap();
2666 __ z_bru(is_badState);
2667 BTB_END( is_badStateF, bsize, "getfield_or_static:is_badStateF");
2668
2669 __ align_address(64);
2670 BIND(is_badState); // Do this outside branch table. Needs a lot of space.
2671 {
2672 unsigned int b_off = __ offset();
2673 if (is_static) {
2674 __ stop_static("Bad state in getstatic");
2675 } else {
2676 __ stop_static("Bad state in getfield");
2677 }
2678 unsigned int e_off = __ offset();
2679 }
2680
2681 __ align_address(64);
2682 BIND(atosHandler); // Oops are really complicated to handle.
2683 // There is a lot of code generated.
2684 // Therefore: generate the handler outside of branch table.
2685 // There is no performance penalty. The additional branch
2686 // to here is compensated for by the fallthru to "Done".
2687 {
2688 unsigned int b_off = __ offset();
2689 __ load_heap_oop(Z_tos, field);
2690 __ verify_oop(Z_tos);
2691 __ push(atos);
2692 if (do_rewrite) {
2693 patch_bytecode(Bytecodes::_fast_agetfield, bc, Z_ARG5);
2694 }
2695 unsigned int e_off = __ offset();
2696 }
2697
2698 BIND(Done);
2699 }
2700
2701 void TemplateTable::getfield(int byte_no) {
2702 BLOCK_COMMENT("getfield {");
2703 getfield_or_static(byte_no, false);
2704 BLOCK_COMMENT("} getfield");
2705 }
2706
2707 void TemplateTable::nofast_getfield(int byte_no) {
2708 getfield_or_static(byte_no, false, may_not_rewrite);
2709 }
2710
2711 void TemplateTable::getstatic(int byte_no) {
2712 BLOCK_COMMENT("getstatic {");
2713 getfield_or_static(byte_no, true);
2714 BLOCK_COMMENT("} getstatic");
2715 }
2716
2717 // The registers cache and index expected to be set before call. The
2718 // function may destroy various registers, just not the cache and
2719 // index registers.
2720 void TemplateTable::jvmti_post_field_mod(Register cache,
2721 Register index, bool is_static) {
2722 transition(vtos, vtos);
2723
2724 if (!JvmtiExport::can_post_field_modification()) {
2725 return;
2726 }
2727
2728 BLOCK_COMMENT("jvmti_post_field_mod {");
2729
2730 // Check to see if a field modification watch has been set before
2731 // we take the time to call into the VM.
2732 Label L1;
2733 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2734 assert_different_registers(cache, index, Z_tos);
2735
2736 __ load_absolute_address(Z_tos, (address)JvmtiExport::get_field_modification_count_addr());
2737 __ load_and_test_int(Z_R0, Address(Z_tos));
2738 __ z_brz(L1);
2739
2740 // Index is returned as byte offset, do not shift!
2741 __ get_cache_and_index_at_bcp(Z_ARG3, Z_R1_scratch, 1);
2742
2743 if (is_static) {
2744 // Life is simple. Null out the object pointer.
2745 __ clear_reg(Z_ARG2, true, false); // Don't set CC.
2746 } else {
2747 // Life is harder. The stack holds the value on top, followed by
2748 // the object. We don't know the size of the value, though. It
2749 // could be one or two words depending on its type. As a result,
2750 // we must find the type to determine where the object is.
2751 __ mem2reg_opt(Z_ARG4,
2752 Address(Z_ARG3, Z_R1_scratch,
2753 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()) +
2754 (BytesPerLong - BytesPerInt)),
2755 false);
2756 __ z_srl(Z_ARG4, ConstantPoolCacheEntry::tos_state_shift);
2757 // Make sure we don't need to mask Z_ARG4 for tos_state after the above shift.
2758 ConstantPoolCacheEntry::verify_tos_state_shift();
2759 __ mem2reg_opt(Z_ARG2, at_tos(1)); // Initially assume a one word jvalue.
2760
2761 NearLabel load_dtos, cont;
2762
2763 __ compareU32_and_branch(Z_ARG4, (intptr_t) ltos,
2764 Assembler::bcondNotEqual, load_dtos);
2765 __ mem2reg_opt(Z_ARG2, at_tos(2)); // ltos (two word jvalue)
2766 __ z_bru(cont);
2767
2768 __ bind(load_dtos);
2769 __ compareU32_and_branch(Z_ARG4, (intptr_t)dtos, Assembler::bcondNotEqual, cont);
2770 __ mem2reg_opt(Z_ARG2, at_tos(2)); // dtos (two word jvalue)
2771
2772 __ bind(cont);
2773 }
2774 // cache entry pointer
2775
2776 __ add2reg_with_index(Z_ARG3, in_bytes(cp_base_offset), Z_ARG3, Z_R1_scratch);
2777
2778 // object(tos)
2779 __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize));
2780 // Z_ARG2: object pointer set up above (NULL if static)
2781 // Z_ARG3: cache entry pointer
2782 // Z_ARG4: jvalue object on the stack
2783 __ call_VM(noreg,
2784 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2785 Z_ARG2, Z_ARG3, Z_ARG4);
2786 __ get_cache_and_index_at_bcp(cache, index, 1);
2787
2788 __ bind(L1);
2789 BLOCK_COMMENT("} jvmti_post_field_mod");
2790 }
2791
2792
2793 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2794 transition(vtos, vtos);
2795
2796 const Register cache = Z_tmp_1;
2797 const Register index = Z_ARG5;
2798 const Register obj = Z_tmp_1;
2799 const Register off = Z_tmp_2;
2800 const Register flags = Z_R1_scratch;
2801 const Register br_tab = Z_ARG5;
2802 const Register bc = Z_tmp_1;
2803 const Register oopStore_tmp1 = Z_R1_scratch;
2804 const Register oopStore_tmp2 = Z_ARG5;
2805 const Register oopStore_tmp3 = Z_R0_scratch;
2806
2807 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2808 jvmti_post_field_mod(cache, index, is_static);
2809 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2810 // begin of life for:
2811 // obj, off long life range
2812 // flags short life range, up to branch into branch table
2813 // end of life for:
2814 // cache, index
2815
2816 const Address field(obj, off);
2817 Label is_Byte, is_Bool, is_Int, is_Short, is_Char,
2818 is_Long, is_Float, is_Object, is_Double;
2819 Label is_badState8, is_badState9, is_badStateA, is_badStateB,
2820 is_badStateC, is_badStateD, is_badStateE, is_badStateF,
2821 is_badState;
2822 Label branchTable, atosHandler, Done;
2823 bool do_rewrite = !is_static && (rc == may_rewrite);
2824 bool dont_rewrite = (is_static || (rc == may_not_rewrite));
2825
2826 assert(do_rewrite == !dont_rewrite, "Oops, code is not fit for that");
2827
2828 assert(btos == 0, "change code, btos != 0");
2829
2830 #ifdef ASSERT
2831 const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*4;
2832 #else
2833 const unsigned int bsize = is_static ? BTB_MINSIZE*1 : BTB_MINSIZE*8;
2834 #endif
2835
2836 // Calculate address of branch table entry and branch there.
2837 {
2838 const int bit_shift = exact_log2(bsize); // Size of each branch table entry.
2839 const int r_bitpos = 63 - bit_shift;
2840 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1;
2841 const int n_rotate = (bit_shift-ConstantPoolCacheEntry::tos_state_shift);
2842 __ z_larl(br_tab, branchTable);
2843 __ rotate_then_insert(flags, flags, l_bitpos, r_bitpos, n_rotate, true);
2844 __ z_bc(Assembler::bcondAlways, 0, flags, br_tab);
2845 }
2846 // end of life for:
2847 // flags, br_tab
2848
2849 __ align_address(bsize);
2850 BIND(branchTable);
2851
2852 // btos
2853 BTB_BEGIN(is_Byte, bsize, "putfield_or_static:is_Byte");
2854 __ pop(btos);
2855 if (!is_static) {
2856 pop_and_check_object(obj);
2857 }
2858 __ z_stc(Z_tos, field);
2859 if (do_rewrite) {
2860 patch_bytecode(Bytecodes::_fast_bputfield, bc, Z_ARG5, true, byte_no);
2861 }
2862 __ z_bru(Done);
2863 BTB_END( is_Byte, bsize, "putfield_or_static:is_Byte");
2864
2865 // ztos
2866 BTB_BEGIN(is_Bool, bsize, "putfield_or_static:is_Bool");
2867 __ pop(ztos);
2868 if (do_rewrite) {
2869 pop_and_check_object(obj);
2870 }
2871 __ z_nilf(Z_tos, 0x1);
2872 __ z_stc(Z_tos, field);
2873 if (!is_static) {
2874 patch_bytecode(Bytecodes::_fast_zputfield, bc, Z_ARG5, true, byte_no);
2875 }
2876 __ z_bru(Done);
2877 BTB_END(is_Bool, bsize, "putfield_or_static:is_Bool");
2878
2879 // ctos
2880 BTB_BEGIN(is_Char, bsize, "putfield_or_static:is_Char");
2881 __ pop(ctos);
2882 if (!is_static) {
2883 pop_and_check_object(obj);
2884 }
2885 __ z_sth(Z_tos, field);
2886 if (do_rewrite) {
2887 patch_bytecode(Bytecodes::_fast_cputfield, bc, Z_ARG5, true, byte_no);
2888 }
2889 __ z_bru(Done);
2890 BTB_END( is_Char, bsize, "putfield_or_static:is_Char");
2891
2892 // stos
2893 BTB_BEGIN(is_Short, bsize, "putfield_or_static:is_Short");
2894 __ pop(stos);
2895 if (!is_static) {
2896 pop_and_check_object(obj);
2897 }
2898 __ z_sth(Z_tos, field);
2899 if (do_rewrite) {
2900 patch_bytecode(Bytecodes::_fast_sputfield, bc, Z_ARG5, true, byte_no);
2901 }
2902 __ z_bru(Done);
2903 BTB_END( is_Short, bsize, "putfield_or_static:is_Short");
2904
2905 // itos
2906 BTB_BEGIN(is_Int, bsize, "putfield_or_static:is_Int");
2907 __ pop(itos);
2908 if (!is_static) {
2909 pop_and_check_object(obj);
2910 }
2911 __ reg2mem_opt(Z_tos, field, false);
2912 if (do_rewrite) {
2913 patch_bytecode(Bytecodes::_fast_iputfield, bc, Z_ARG5, true, byte_no);
2914 }
2915 __ z_bru(Done);
2916 BTB_END( is_Int, bsize, "putfield_or_static:is_Int");
2917
2918 // ltos
2919 BTB_BEGIN(is_Long, bsize, "putfield_or_static:is_Long");
2920 __ pop(ltos);
2921 if (!is_static) {
2922 pop_and_check_object(obj);
2923 }
2924 __ reg2mem_opt(Z_tos, field);
2925 if (do_rewrite) {
2926 patch_bytecode(Bytecodes::_fast_lputfield, bc, Z_ARG5, true, byte_no);
2927 }
2928 __ z_bru(Done);
2929 BTB_END( is_Long, bsize, "putfield_or_static:is_Long");
2930
2931 // ftos
2932 BTB_BEGIN(is_Float, bsize, "putfield_or_static:is_Float");
2933 __ pop(ftos);
2934 if (!is_static) {
2935 pop_and_check_object(obj);
2936 }
2937 __ freg2mem_opt(Z_ftos, field, false);
2938 if (do_rewrite) {
2939 patch_bytecode(Bytecodes::_fast_fputfield, bc, Z_ARG5, true, byte_no);
2940 }
2941 __ z_bru(Done);
2942 BTB_END( is_Float, bsize, "putfield_or_static:is_Float");
2943
2944 // dtos
2945 BTB_BEGIN(is_Double, bsize, "putfield_or_static:is_Double");
2946 __ pop(dtos);
2947 if (!is_static) {
2948 pop_and_check_object(obj);
2949 }
2950 __ freg2mem_opt(Z_ftos, field);
2951 if (do_rewrite) {
2952 patch_bytecode(Bytecodes::_fast_dputfield, bc, Z_ARG5, true, byte_no);
2953 }
2954 __ z_bru(Done);
2955 BTB_END( is_Double, bsize, "putfield_or_static:is_Double");
2956
2957 // atos
2958 BTB_BEGIN(is_Object, bsize, "putfield_or_static:is_Object");
2959 __ z_bru(atosHandler);
2960 BTB_END( is_Object, bsize, "putfield_or_static:is_Object");
2961
2962 // Bad state detection comes at no extra runtime cost.
2963 BTB_BEGIN(is_badState8, bsize, "putfield_or_static:is_badState8");
2964 __ z_illtrap();
2965 __ z_bru(is_badState);
2966 BTB_END( is_badState8, bsize, "putfield_or_static:is_badState8");
2967 BTB_BEGIN(is_badState9, bsize, "putfield_or_static:is_badState9");
2968 __ z_illtrap();
2969 __ z_bru(is_badState);
2970 BTB_END( is_badState9, bsize, "putfield_or_static:is_badState9");
2971 BTB_BEGIN(is_badStateA, bsize, "putfield_or_static:is_badStateA");
2972 __ z_illtrap();
2973 __ z_bru(is_badState);
2974 BTB_END( is_badStateA, bsize, "putfield_or_static:is_badStateA");
2975 BTB_BEGIN(is_badStateB, bsize, "putfield_or_static:is_badStateB");
2976 __ z_illtrap();
2977 __ z_bru(is_badState);
2978 BTB_END( is_badStateB, bsize, "putfield_or_static:is_badStateB");
2979 BTB_BEGIN(is_badStateC, bsize, "putfield_or_static:is_badStateC");
2980 __ z_illtrap();
2981 __ z_bru(is_badState);
2982 BTB_END( is_badStateC, bsize, "putfield_or_static:is_badStateC");
2983 BTB_BEGIN(is_badStateD, bsize, "putfield_or_static:is_badStateD");
2984 __ z_illtrap();
2985 __ z_bru(is_badState);
2986 BTB_END( is_badStateD, bsize, "putfield_or_static:is_badStateD");
2987 BTB_BEGIN(is_badStateE, bsize, "putfield_or_static:is_badStateE");
2988 __ z_illtrap();
2989 __ z_bru(is_badState);
2990 BTB_END( is_badStateE, bsize, "putfield_or_static:is_badStateE");
2991 BTB_BEGIN(is_badStateF, bsize, "putfield_or_static:is_badStateF");
2992 __ z_illtrap();
2993 __ z_bru(is_badState);
2994 BTB_END( is_badStateF, bsize, "putfield_or_static:is_badStateF");
2995
2996 __ align_address(64);
2997 BIND(is_badState); // Do this outside branch table. Needs a lot of space.
2998 {
2999 unsigned int b_off = __ offset();
3000 if (is_static) __ stop_static("Bad state in putstatic");
3001 else __ stop_static("Bad state in putfield");
3002 unsigned int e_off = __ offset();
3003 }
3004
3005 __ align_address(64);
3006 BIND(atosHandler); // Oops are really complicated to handle.
3007 // There is a lot of code generated.
3008 // Therefore: generate the handler outside of branch table.
3009 // There is no performance penalty. The additional branch
3010 // to here is compensated for by the fallthru to "Done".
3011 {
3012 unsigned int b_off = __ offset();
3013 __ pop(atos);
3014 if (!is_static) {
3015 pop_and_check_object(obj);
3016 }
3017 // Store into the field
3018 do_oop_store(_masm, obj, off, Z_tos, false,
3019 oopStore_tmp1, oopStore_tmp2, oopStore_tmp3, _bs->kind(), false);
3020 if (do_rewrite) {
3021 patch_bytecode(Bytecodes::_fast_aputfield, bc, Z_ARG5, true, byte_no);
3022 }
3023 // __ z_bru(Done); // fallthru
3024 unsigned int e_off = __ offset();
3025 }
3026
3027 BIND(Done);
3028
3029 // Check for volatile store.
3030 Label notVolatile;
3031
3032 __ testbit(Z_ARG4, ConstantPoolCacheEntry::is_volatile_shift);
3033 __ z_brz(notVolatile);
3034 __ z_fence();
3035
3036 BIND(notVolatile);
3037 }
3038
3039 void TemplateTable::putfield(int byte_no) {
3040 BLOCK_COMMENT("putfield {");
3041 putfield_or_static(byte_no, false);
3042 BLOCK_COMMENT("} putfield");
3043 }
3044
3045 void TemplateTable::nofast_putfield(int byte_no) {
3046 putfield_or_static(byte_no, false, may_not_rewrite);
3047 }
3048
3049 void TemplateTable::putstatic(int byte_no) {
3050 BLOCK_COMMENT("putstatic {");
3051 putfield_or_static(byte_no, true);
3052 BLOCK_COMMENT("} putstatic");
3053 }
3054
3055 // Push the tos value back to the stack.
3056 // gc will find oops there and update.
3057 void TemplateTable::jvmti_post_fast_field_mod() {
3058
3059 if (!JvmtiExport::can_post_field_modification()) {
3060 return;
3061 }
3062
3063 // Check to see if a field modification watch has been set before
3064 // we take the time to call into the VM.
3065 Label exit;
3066
3067 BLOCK_COMMENT("jvmti_post_fast_field_mod {");
3068
3069 __ load_absolute_address(Z_R1_scratch,
3070 (address) JvmtiExport::get_field_modification_count_addr());
3071 __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch));
3072 __ z_brz(exit);
3073
3074 Register obj = Z_tmp_1;
3075
3076 __ pop_ptr(obj); // Copy the object pointer from tos.
3077 __ verify_oop(obj);
3078 __ push_ptr(obj); // Put the object pointer back on tos.
3079
3080 // Save tos values before call_VM() clobbers them. Since we have
3081 // to do it for every data type, we use the saved values as the
3082 // jvalue object.
3083 switch (bytecode()) { // Load values into the jvalue object.
3084 case Bytecodes::_fast_aputfield:
3085 __ push_ptr(Z_tos);
3086 break;
3087 case Bytecodes::_fast_bputfield:
3088 case Bytecodes::_fast_zputfield:
3089 case Bytecodes::_fast_sputfield:
3090 case Bytecodes::_fast_cputfield:
3091 case Bytecodes::_fast_iputfield:
3092 __ push_i(Z_tos);
3093 break;
3094 case Bytecodes::_fast_dputfield:
3095 __ push_d();
3096 break;
3097 case Bytecodes::_fast_fputfield:
3098 __ push_f();
3099 break;
3100 case Bytecodes::_fast_lputfield:
3101 __ push_l(Z_tos);
3102 break;
3103
3104 default:
3105 ShouldNotReachHere();
3106 }
3107
3108 // jvalue on the stack
3109 __ load_address(Z_ARG4, Address(Z_esp, Interpreter::stackElementSize));
3110 // Access constant pool cache entry.
3111 __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tos, 1);
3112 __ verify_oop(obj);
3113
3114 // obj : object pointer copied above
3115 // Z_ARG3: cache entry pointer
3116 // Z_ARG4: jvalue object on the stack
3117 __ call_VM(noreg,
3118 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3119 obj, Z_ARG3, Z_ARG4);
3120
3121 switch (bytecode()) { // Restore tos values.
3122 case Bytecodes::_fast_aputfield:
3123 __ pop_ptr(Z_tos);
3124 break;
3125 case Bytecodes::_fast_bputfield:
3126 case Bytecodes::_fast_zputfield:
3127 case Bytecodes::_fast_sputfield:
3128 case Bytecodes::_fast_cputfield:
3129 case Bytecodes::_fast_iputfield:
3130 __ pop_i(Z_tos);
3131 break;
3132 case Bytecodes::_fast_dputfield:
3133 __ pop_d(Z_ftos);
3134 break;
3135 case Bytecodes::_fast_fputfield:
3136 __ pop_f(Z_ftos);
3137 break;
3138 case Bytecodes::_fast_lputfield:
3139 __ pop_l(Z_tos);
3140 break;
3141 }
3142
3143 __ bind(exit);
3144 BLOCK_COMMENT("} jvmti_post_fast_field_mod");
3145 }
3146
3147 void TemplateTable::fast_storefield(TosState state) {
3148 transition(state, vtos);
3149
3150 ByteSize base = ConstantPoolCache::base_offset();
3151 jvmti_post_fast_field_mod();
3152
3153 // Access constant pool cache.
3154 Register cache = Z_tmp_1;
3155 Register index = Z_tmp_2;
3156 Register flags = Z_ARG5;
3157
3158 // Index comes in bytes, don't shift afterwards!
3159 __ get_cache_and_index_at_bcp(cache, index, 1);
3160
3161 // Test for volatile.
3162 assert(!flags->is_volatile(), "do_oop_store could perform leaf RT call");
3163 __ z_lg(flags, Address(cache, index, base + ConstantPoolCacheEntry::flags_offset()));
3164
3165 // Replace index with field offset from cache entry.
3166 Register field_offset = index;
3167 __ z_lg(field_offset, Address(cache, index, base + ConstantPoolCacheEntry::f2_offset()));
3168
3169 // Get object from stack.
3170 Register obj = cache;
3171
3172 pop_and_check_object(obj);
3173
3174 // field address
3175 const Address field(obj, field_offset);
3176
3177 // access field
3178 switch (bytecode()) {
3179 case Bytecodes::_fast_aputfield:
3180 do_oop_store(_masm, obj, field_offset, Z_tos, false,
3181 Z_ARG2, Z_ARG3, Z_ARG4, _bs->kind(), false);
3182 break;
3183 case Bytecodes::_fast_lputfield:
3184 __ reg2mem_opt(Z_tos, field);
3185 break;
3186 case Bytecodes::_fast_iputfield:
3187 __ reg2mem_opt(Z_tos, field, false);
3188 break;
3189 case Bytecodes::_fast_zputfield:
3190 __ z_nilf(Z_tos, 0x1);
3191 // fall through to bputfield
3192 case Bytecodes::_fast_bputfield:
3193 __ z_stc(Z_tos, field);
3194 break;
3195 case Bytecodes::_fast_sputfield:
3196 // fall through
3197 case Bytecodes::_fast_cputfield:
3198 __ z_sth(Z_tos, field);
3199 break;
3200 case Bytecodes::_fast_fputfield:
3201 __ freg2mem_opt(Z_ftos, field, false);
3202 break;
3203 case Bytecodes::_fast_dputfield:
3204 __ freg2mem_opt(Z_ftos, field);
3205 break;
3206 default:
3207 ShouldNotReachHere();
3208 }
3209
3210 // Check for volatile store.
3211 Label notVolatile;
3212
3213 __ testbit(flags, ConstantPoolCacheEntry::is_volatile_shift);
3214 __ z_brz(notVolatile);
3215 __ z_fence();
3216
3217 __ bind(notVolatile);
3218 }
3219
3220 void TemplateTable::fast_accessfield(TosState state) {
3221 transition(atos, state);
3222
3223 Register obj = Z_tos;
3224
3225 // Do the JVMTI work here to avoid disturbing the register state below
3226 if (JvmtiExport::can_post_field_access()) {
3227 // Check to see if a field access watch has been set before we
3228 // take the time to call into the VM.
3229 Label cont;
3230
3231 __ load_absolute_address(Z_R1_scratch,
3232 (address)JvmtiExport::get_field_access_count_addr());
3233 __ load_and_test_int(Z_R0_scratch, Address(Z_R1_scratch));
3234 __ z_brz(cont);
3235
3236 // Access constant pool cache entry.
3237
3238 __ get_cache_entry_pointer_at_bcp(Z_ARG3, Z_tmp_1, 1);
3239 __ verify_oop(obj);
3240 __ push_ptr(obj); // Save object pointer before call_VM() clobbers it.
3241 __ z_lgr(Z_ARG2, obj);
3242
3243 // Z_ARG2: object pointer copied above
3244 // Z_ARG3: cache entry pointer
3245 __ call_VM(noreg,
3246 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
3247 Z_ARG2, Z_ARG3);
3248 __ pop_ptr(obj); // Restore object pointer.
3249
3250 __ bind(cont);
3251 }
3252
3253 // Access constant pool cache.
3254 Register cache = Z_tmp_1;
3255 Register index = Z_tmp_2;
3256
3257 // Index comes in bytes, don't shift afterwards!
3258 __ get_cache_and_index_at_bcp(cache, index, 1);
3259 // Replace index with field offset from cache entry.
3260 __ mem2reg_opt(index,
3261 Address(cache, index,
3262 ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3263
3264 __ verify_oop(obj);
3265 __ null_check(obj);
3266
3267 Address field(obj, index);
3268
3269 // access field
3270 switch (bytecode()) {
3271 case Bytecodes::_fast_agetfield:
3272 __ load_heap_oop(Z_tos, field);
3273 __ verify_oop(Z_tos);
3274 return;
3275 case Bytecodes::_fast_lgetfield:
3276 __ mem2reg_opt(Z_tos, field);
3277 return;
3278 case Bytecodes::_fast_igetfield:
3279 __ mem2reg_opt(Z_tos, field, false);
3280 return;
3281 case Bytecodes::_fast_bgetfield:
3282 __ z_lb(Z_tos, field);
3283 return;
3284 case Bytecodes::_fast_sgetfield:
3285 __ z_lh(Z_tos, field);
3286 return;
3287 case Bytecodes::_fast_cgetfield:
3288 __ z_llgh(Z_tos, field); // Load into 64 bits, works on all CPUs.
3289 return;
3290 case Bytecodes::_fast_fgetfield:
3291 __ mem2freg_opt(Z_ftos, field, false);
3292 return;
3293 case Bytecodes::_fast_dgetfield:
3294 __ mem2freg_opt(Z_ftos, field);
3295 return;
3296 default:
3297 ShouldNotReachHere();
3298 }
3299 }
3300
3301 void TemplateTable::fast_xaccess(TosState state) {
3302 transition(vtos, state);
3303
3304 Register receiver = Z_tos;
3305 // Get receiver.
3306 __ mem2reg_opt(Z_tos, aaddress(0));
3307
3308 // Access constant pool cache.
3309 Register cache = Z_tmp_1;
3310 Register index = Z_tmp_2;
3311
3312 // Index comes in bytes, don't shift afterwards!
3313 __ get_cache_and_index_at_bcp(cache, index, 2);
3314 // Replace index with field offset from cache entry.
3315 __ mem2reg_opt(index,
3316 Address(cache, index,
3317 ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3318
3319 // Make sure exception is reported in correct bcp range (getfield is
3320 // next instruction).
3321 __ add2reg(Z_bcp, 1);
3322 __ null_check(receiver);
3323 switch (state) {
3324 case itos:
3325 __ mem2reg_opt(Z_tos, Address(receiver, index), false);
3326 break;
3327 case atos:
3328 __ load_heap_oop(Z_tos, Address(receiver, index));
3329 __ verify_oop(Z_tos);
3330 break;
3331 case ftos:
3332 __ mem2freg_opt(Z_ftos, Address(receiver, index));
3333 break;
3334 default:
3335 ShouldNotReachHere();
3336 }
3337
3338 // Reset bcp to original position.
3339 __ add2reg(Z_bcp, -1);
3340 }
3341
3342 //-----------------------------------------------------------------------------
3343 // Calls
3344
3345 void TemplateTable::prepare_invoke(int byte_no,
3346 Register method, // linked method (or i-klass)
3347 Register index, // itable index, MethodType, etc.
3348 Register recv, // If caller wants to see it.
3349 Register flags) { // If caller wants to test it.
3350 // Determine flags.
3351 const Bytecodes::Code code = bytecode();
3352 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3353 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3354 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3355 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3356 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3357 const bool load_receiver = (recv != noreg);
3358 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3359
3360 // Setup registers & access constant pool cache.
3361 if (recv == noreg) { recv = Z_ARG1; }
3362 if (flags == noreg) { flags = Z_ARG2; }
3363 assert_different_registers(method, Z_R14, index, recv, flags);
3364
3365 BLOCK_COMMENT("prepare_invoke {");
3366
3367 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3368
3369 // Maybe push appendix to arguments.
3370 if (is_invokedynamic || is_invokehandle) {
3371 Label L_no_push;
3372 Register resolved_reference = Z_R1_scratch;
3373 __ testbit(flags, ConstantPoolCacheEntry::has_appendix_shift);
3374 __ z_bfalse(L_no_push);
3375 // Push the appendix as a trailing parameter.
3376 // This must be done before we get the receiver,
3377 // since the parameter_size includes it.
3378 __ load_resolved_reference_at_index(resolved_reference, index);
3379 __ verify_oop(resolved_reference);
3380 __ push_ptr(resolved_reference); // Push appendix (MethodType, CallSite, etc.).
3381 __ bind(L_no_push);
3382 }
3383
3384 // Load receiver if needed (after appendix is pushed so parameter size is correct).
3385 if (load_receiver) {
3386 assert(!is_invokedynamic, "");
3387 // recv := int2long(flags & ConstantPoolCacheEntry::parameter_size_mask) << 3
3388 // Flags is zero-extended int2long when loaded during load_invoke_cp_cache_entry().
3389 // Only the least significant byte (psize) of flags is used.
3390 {
3391 const unsigned int logSES = Interpreter::logStackElementSize;
3392 const int bit_shift = logSES;
3393 const int r_bitpos = 63 - bit_shift;
3394 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::parameter_size_bits + 1;
3395 const int n_rotate = bit_shift;
3396 assert(ConstantPoolCacheEntry::parameter_size_mask == 255, "adapt bitpositions");
3397 __ rotate_then_insert(recv, flags, l_bitpos, r_bitpos, n_rotate, true);
3398 }
3399 // Recv now contains #arguments * StackElementSize.
3400
3401 Address recv_addr(Z_esp, recv);
3402 __ z_lg(recv, recv_addr);
3403 __ verify_oop(recv);
3404 }
3405
3406 // Compute return type.
3407 // ret_type is used by callers (invokespecial, invokestatic) at least.
3408 Register ret_type = Z_R1_scratch;
3409 assert_different_registers(ret_type, method);
3410
3411 const address table_addr = (address)Interpreter::invoke_return_entry_table_for(code);
3412 __ load_absolute_address(Z_R14, table_addr);
3413
3414 {
3415 const int bit_shift = LogBytesPerWord; // Size of each table entry.
3416 const int r_bitpos = 63 - bit_shift;
3417 const int l_bitpos = r_bitpos - ConstantPoolCacheEntry::tos_state_bits + 1;
3418 const int n_rotate = bit_shift-ConstantPoolCacheEntry::tos_state_shift;
3419 __ rotate_then_insert(ret_type, flags, l_bitpos, r_bitpos, n_rotate, true);
3420 // Make sure we don't need to mask flags for tos_state after the above shift.
3421 ConstantPoolCacheEntry::verify_tos_state_shift();
3422 }
3423
3424 __ z_lg(Z_R14, Address(Z_R14, ret_type)); // Load return address.
3425 BLOCK_COMMENT("} prepare_invoke");
3426 }
3427
3428
3429 void TemplateTable::invokevirtual_helper(Register index,
3430 Register recv,
3431 Register flags) {
3432 // Uses temporary registers Z_tmp_2, Z_ARG4.
3433 assert_different_registers(index, recv, Z_tmp_2, Z_ARG4);
3434
3435 // Test for an invoke of a final method.
3436 Label notFinal;
3437
3438 BLOCK_COMMENT("invokevirtual_helper {");
3439
3440 __ testbit(flags, ConstantPoolCacheEntry::is_vfinal_shift);
3441 __ z_brz(notFinal);
3442
3443 const Register method = index; // Method must be Z_ARG3.
3444 assert(method == Z_ARG3, "method must be second argument for interpreter calling convention");
3445
3446 // Do the call - the index is actually the method to call.
3447 // That is, f2 is a vtable index if !is_vfinal, else f2 is a method.
3448
3449 // It's final, need a null check here!
3450 __ null_check(recv);
3451
3452 // Profile this call.
3453 __ profile_final_call(Z_tmp_2);
3454 __ profile_arguments_type(Z_tmp_2, method, Z_ARG5, true); // Argument type profiling.
3455 __ jump_from_interpreted(method, Z_tmp_2);
3456
3457 __ bind(notFinal);
3458
3459 // Get receiver klass.
3460 __ null_check(recv, Z_R0_scratch, oopDesc::klass_offset_in_bytes());
3461 __ load_klass(Z_tmp_2, recv);
3462
3463 // Profile this call.
3464 __ profile_virtual_call(Z_tmp_2, Z_ARG4, Z_ARG5);
3465
3466 // Get target method & entry point.
3467 __ z_sllg(index, index, exact_log2(vtableEntry::size_in_bytes()));
3468 __ mem2reg_opt(method,
3469 Address(Z_tmp_2, index,
3470 Klass::vtable_start_offset() + in_ByteSize(vtableEntry::method_offset_in_bytes())));
3471 __ profile_arguments_type(Z_ARG4, method, Z_ARG5, true);
3472 __ jump_from_interpreted(method, Z_ARG4);
3473 BLOCK_COMMENT("} invokevirtual_helper");
3474 }
3475
3476 void TemplateTable::invokevirtual(int byte_no) {
3477 transition(vtos, vtos);
3478
3479 assert(byte_no == f2_byte, "use this argument");
3480 prepare_invoke(byte_no,
3481 Z_ARG3, // method or vtable index
3482 noreg, // unused itable index
3483 Z_ARG1, // recv
3484 Z_ARG2); // flags
3485
3486 // Z_ARG3 : index
3487 // Z_ARG1 : receiver
3488 // Z_ARG2 : flags
3489 invokevirtual_helper(Z_ARG3, Z_ARG1, Z_ARG2);
3490 }
3491
3492 void TemplateTable::invokespecial(int byte_no) {
3493 transition(vtos, vtos);
3494
3495 assert(byte_no == f1_byte, "use this argument");
3496 Register Rmethod = Z_tmp_2;
3497 prepare_invoke(byte_no, Rmethod, noreg, // Get f1 method.
3498 Z_ARG3); // Get receiver also for null check.
3499 __ verify_oop(Z_ARG3);
3500 __ null_check(Z_ARG3);
3501 // Do the call.
3502 __ profile_call(Z_ARG2);
3503 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false);
3504 __ jump_from_interpreted(Rmethod, Z_R1_scratch);
3505 }
3506
3507 void TemplateTable::invokestatic(int byte_no) {
3508 transition(vtos, vtos);
3509
3510 assert(byte_no == f1_byte, "use this argument");
3511 Register Rmethod = Z_tmp_2;
3512 prepare_invoke(byte_no, Rmethod); // Get f1 method.
3513 // Do the call.
3514 __ profile_call(Z_ARG2);
3515 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false);
3516 __ jump_from_interpreted(Rmethod, Z_R1_scratch);
3517 }
3518
3519 // Outdated feature, and we don't support it.
3520 void TemplateTable::fast_invokevfinal(int byte_no) {
3521 transition(vtos, vtos);
3522 assert(byte_no == f2_byte, "use this argument");
3523 __ stop("fast_invokevfinal not used on linuxs390x");
3524 }
3525
3526 void TemplateTable::invokeinterface(int byte_no) {
3527 transition(vtos, vtos);
3528
3529 assert(byte_no == f1_byte, "use this argument");
3530 Register interface = Z_tos;
3531 Register index = Z_ARG3;
3532 Register receiver = Z_tmp_1;
3533 Register flags = Z_ARG5;
3534
3535 BLOCK_COMMENT("invokeinterface {");
3536
3537 // Destroys Z_ARG1 and Z_ARG2, thus use Z_ARG4 and copy afterwards.
3538 prepare_invoke(byte_no, Z_ARG4, index, // Get f1 klassOop, f2 itable index.
3539 receiver, flags);
3540
3541 // Z_R14 (== Z_bytecode) : return entry
3542
3543 __ z_lgr(interface, Z_ARG4);
3544
3545 // Special case of invokeinterface called for virtual method of
3546 // java.lang.Object. See cpCacheOop.cpp for details.
3547 // This code isn't produced by javac, but could be produced by
3548 // another compliant java compiler.
3549 Label notMethod;
3550 __ testbit(flags, ConstantPoolCacheEntry::is_forced_virtual_shift);
3551 __ z_brz(notMethod);
3552 invokevirtual_helper(index, receiver, flags);
3553 __ bind(notMethod);
3554
3555 // Get receiver klass into klass - also a null check.
3556 Register klass = flags;
3557
3558 __ restore_locals();
3559 __ load_klass(klass, receiver);
3560
3561 // Profile this call.
3562 __ profile_virtual_call(klass, Z_ARG2/*mdp*/, Z_ARG4/*scratch*/);
3563
3564 NearLabel no_such_interface, no_such_method;
3565 Register method = Z_tmp_2;
3566
3567 // TK 2010-08-24: save the index to Z_ARG4. needed in case of an error
3568 // in throw_AbstractMethodErrorByTemplateTable
3569 __ z_lgr(Z_ARG4, index);
3570 // TK 2011-03-24: copy also klass because it could be changed in
3571 // lookup_interface_method
3572 __ z_lgr(Z_ARG2, klass);
3573 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3574 klass, interface, index,
3575 // outputs: method, scan temp. reg
3576 method, Z_tmp_2, Z_R1_scratch,
3577 no_such_interface);
3578
3579 // Check for abstract method error.
3580 // Note: This should be done more efficiently via a throw_abstract_method_error
3581 // interpreter entry point and a conditional jump to it in case of a null
3582 // method.
3583 __ compareU64_and_branch(method, (intptr_t) 0,
3584 Assembler::bcondZero, no_such_method);
3585
3586 __ profile_arguments_type(Z_ARG3, method, Z_ARG5, true);
3587
3588 // Do the call.
3589 __ jump_from_interpreted(method, Z_ARG5);
3590 __ should_not_reach_here();
3591
3592 // exception handling code follows...
3593 // Note: Must restore interpreter registers to canonical
3594 // state for exception handling to work correctly!
3595
3596 __ bind(no_such_method);
3597
3598 // Throw exception.
3599 __ restore_bcp(); // Bcp must be correct for exception handler (was destroyed).
3600 __ restore_locals(); // Make sure locals pointer is correct as well (was destroyed).
3601 // TK 2010-08-24: Call throw_AbstractMethodErrorByTemplateTable now with the
3602 // relevant information for generating a better error message
3603 __ call_VM(noreg,
3604 CAST_FROM_FN_PTR(address,
3605 InterpreterRuntime::throw_AbstractMethodError),
3606 Z_ARG2, interface, Z_ARG4);
3607 // The call_VM checks for exception, so we should never return here.
3608 __ should_not_reach_here();
3609
3610 __ bind(no_such_interface);
3611
3612 // Throw exception.
3613 __ restore_bcp(); // Bcp must be correct for exception handler (was destroyed).
3614 __ restore_locals(); // Make sure locals pointer is correct as well (was destroyed).
3615 // TK 2010-08-24: Call throw_IncompatibleClassChangeErrorByTemplateTable now with the
3616 // relevant information for generating a better error message
3617 __ call_VM(noreg,
3618 CAST_FROM_FN_PTR(address,
3619 InterpreterRuntime::throw_IncompatibleClassChangeError),
3620 Z_ARG2, interface);
3621 // The call_VM checks for exception, so we should never return here.
3622 __ should_not_reach_here();
3623
3624 BLOCK_COMMENT("} invokeinterface");
3625 return;
3626 }
3627
3628 void TemplateTable::invokehandle(int byte_no) {
3629 transition(vtos, vtos);
3630
3631 const Register method = Z_tmp_2;
3632 const Register recv = Z_ARG5;
3633 const Register mtype = Z_tmp_1;
3634 prepare_invoke(byte_no,
3635 method, mtype, // Get f2 method, f1 MethodType.
3636 recv);
3637 __ verify_method_ptr(method);
3638 __ verify_oop(recv);
3639 __ null_check(recv);
3640
3641 // Note: Mtype is already pushed (if necessary) by prepare_invoke.
3642
3643 // FIXME: profile the LambdaForm also.
3644 __ profile_final_call(Z_ARG2);
3645 __ profile_arguments_type(Z_ARG3, method, Z_ARG5, true);
3646
3647 __ jump_from_interpreted(method, Z_ARG3);
3648 }
3649
3650 void TemplateTable::invokedynamic(int byte_no) {
3651 transition(vtos, vtos);
3652
3653 const Register Rmethod = Z_tmp_2;
3654 const Register Rcallsite = Z_tmp_1;
3655
3656 prepare_invoke(byte_no, Rmethod, Rcallsite);
3657
3658 // Rmethod: CallSite object (from f1)
3659 // Rcallsite: MH.linkToCallSite method (from f2)
3660
3661 // Note: Callsite is already pushed by prepare_invoke.
3662
3663 // TODO: should make a type profile for any invokedynamic that takes a ref argument.
3664 // Profile this call.
3665 __ profile_call(Z_ARG2);
3666 __ profile_arguments_type(Z_ARG2, Rmethod, Z_ARG5, false);
3667 __ jump_from_interpreted(Rmethod, Z_ARG2);
3668 }
3669
3670 //-----------------------------------------------------------------------------
3671 // Allocation
3672
3673 // Original comment on "allow_shared_alloc":
3674 // Always go the slow path.
3675 // + Eliminated optimization within the template-based interpreter:
3676 // If an allocation is done within the interpreter without using
3677 // tlabs, the interpreter tries to do the allocation directly
3678 // on the heap.
3679 // + That means the profiling hooks are not considered and allocations
3680 // get lost for the profiling framework.
3681 // + However, we do not think that this optimization is really needed,
3682 // so we always go now the slow path through the VM in this case --
3683 // spec jbb2005 shows no measurable performance degradation.
3684 void TemplateTable::_new() {
3685 transition(vtos, atos);
3686 address prev_instr_address = NULL;
3687 Register tags = Z_tmp_1;
3688 Register RallocatedObject = Z_tos;
3689 Register cpool = Z_ARG2;
3690 Register tmp = Z_ARG3; // RobjectFields==tmp and Rsize==offset must be a register pair.
3691 Register offset = Z_ARG4;
3692 Label slow_case;
3693 Label done;
3694 Label initialize_header;
3695 Label initialize_object; // Including clearing the fields.
3696 Label allocate_shared;
3697
3698 BLOCK_COMMENT("TemplateTable::_new {");
3699 __ get_2_byte_integer_at_bcp(offset/*dest*/, 1, InterpreterMacroAssembler::Unsigned);
3700 __ get_cpool_and_tags(cpool, tags);
3701 // Make sure the class we're about to instantiate has been resolved.
3702 // This is done before loading InstanceKlass to be consistent with the order
3703 // how Constant Pool is updated (see ConstantPool::klass_at_put).
3704 const int tags_offset = Array<u1>::base_offset_in_bytes();
3705 __ load_address(tmp, Address(tags, offset, tags_offset));
3706 __ z_cli(0, tmp, JVM_CONSTANT_Class);
3707 __ z_brne(slow_case);
3708
3709 __ z_sllg(offset, offset, LogBytesPerWord); // Convert to to offset.
3710 // Get InstanceKlass.
3711 Register iklass = cpool;
3712 __ load_resolved_klass_at_offset(cpool, offset, iklass);
3713
3714 // Make sure klass is initialized & doesn't have finalizer.
3715 // Make sure klass is fully initialized.
3716 const int state_offset = in_bytes(InstanceKlass::init_state_offset());
3717 if (Immediate::is_uimm12(state_offset)) {
3718 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized);
3719 } else {
3720 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized);
3721 }
3722 __ z_brne(slow_case);
3723
3724 // Get instance_size in InstanceKlass (scaled to a count of bytes).
3725 Register Rsize = offset;
3726 const int mask = 1 << Klass::_lh_instance_slow_path_bit;
3727 __ z_llgf(Rsize, Address(iklass, Klass::layout_helper_offset()));
3728 __ z_tmll(Rsize, mask);
3729 __ z_btrue(slow_case);
3730
3731 // Allocate the instance
3732 // 1) Try to allocate in the TLAB.
3733 // 2) If fail and the object is large allocate in the shared Eden.
3734 // 3) If the above fails (or is not applicable), go to a slow case
3735 // (creates a new TLAB, etc.).
3736
3737 // Always go the slow path. See comment above this template.
3738 const bool allow_shared_alloc = false;
3739
3740 if (UseTLAB) {
3741 Register RoldTopValue = RallocatedObject;
3742 Register RnewTopValue = tmp;
3743 __ z_lg(RoldTopValue, Address(Z_thread, JavaThread::tlab_top_offset()));
3744 __ load_address(RnewTopValue, Address(RoldTopValue, Rsize));
3745 __ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_end_offset()));
3746 __ z_brh(allow_shared_alloc ? allocate_shared : slow_case);
3747 __ z_stg(RnewTopValue, Address(Z_thread, JavaThread::tlab_top_offset()));
3748 if (ZeroTLAB) {
3749 // The fields have been already cleared.
3750 __ z_bru(initialize_header);
3751 } else {
3752 // Initialize both the header and fields.
3753 if (allow_shared_alloc) {
3754 __ z_bru(initialize_object);
3755 } else {
3756 // Fallthrough to initialize_object, but assert that it is on fall through path.
3757 prev_instr_address = __ pc();
3758 }
3759 }
3760 }
3761
3762 if (allow_shared_alloc) {
3763 // Allocation in shared Eden not implemented, because sapjvm allocation trace does not allow it.
3764 Unimplemented();
3765 }
3766
3767 if (UseTLAB) {
3768 Register RobjectFields = tmp;
3769 Register Rzero = Z_R1_scratch;
3770
3771 assert(ZeroTLAB || prev_instr_address == __ pc(),
3772 "must not omit jump to initialize_object above, as it is not on the fall through path");
3773 __ clear_reg(Rzero, true /*whole reg*/, false); // Load 0L into Rzero. Don't set CC.
3774
3775 // The object is initialized before the header. If the object size is
3776 // zero, go directly to the header initialization.
3777 __ bind(initialize_object);
3778 __ z_aghi(Rsize, (int)-sizeof(oopDesc)); // Subtract header size, set CC.
3779 __ z_bre(initialize_header); // Jump if size of fields is zero.
3780
3781 // Initialize object fields.
3782 // See documentation for MVCLE instruction!!!
3783 assert(RobjectFields->encoding() % 2 == 0, "RobjectFields must be an even register");
3784 assert(Rsize->encoding() == (RobjectFields->encoding()+1),
3785 "RobjectFields and Rsize must be a register pair");
3786 assert(Rzero->encoding() % 2 == 1, "Rzero must be an odd register");
3787
3788 // Set Rzero to 0 and use it as src length, then mvcle will copy nothing
3789 // and fill the object with the padding value 0.
3790 __ add2reg(RobjectFields, sizeof(oopDesc), RallocatedObject);
3791 __ move_long_ext(RobjectFields, as_Register(Rzero->encoding() - 1), 0);
3792
3793 // Initialize object header only.
3794 __ bind(initialize_header);
3795 if (UseBiasedLocking) {
3796 Register prototype = RobjectFields;
3797 __ z_lg(prototype, Address(iklass, Klass::prototype_header_offset()));
3798 __ z_stg(prototype, Address(RallocatedObject, oopDesc::mark_offset_in_bytes()));
3799 } else {
3800 __ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()),
3801 (long)markOopDesc::prototype());
3802 }
3803
3804 __ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops.
3805 __ store_klass(iklass, RallocatedObject); // Store klass last.
3806
3807 {
3808 SkipIfEqual skip(_masm, &DTraceAllocProbes, false, Z_ARG5 /*scratch*/);
3809 // Trigger dtrace event for fastpath.
3810 __ push(atos); // Save the return value.
3811 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), RallocatedObject);
3812 __ pop(atos); // Restore the return value.
3813 }
3814 __ z_bru(done);
3815 }
3816
3817 // slow case
3818 __ bind(slow_case);
3819 __ get_constant_pool(Z_ARG2);
3820 __ get_2_byte_integer_at_bcp(Z_ARG3/*dest*/, 1, InterpreterMacroAssembler::Unsigned);
3821 call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Z_ARG2, Z_ARG3);
3822 __ verify_oop(Z_tos);
3823
3824 // continue
3825 __ bind(done);
3826
3827 BLOCK_COMMENT("} TemplateTable::_new");
3828 }
3829
3830 void TemplateTable::newarray() {
3831 transition(itos, atos);
3832
3833 // Call runtime.
3834 __ z_llgc(Z_ARG2, at_bcp(1)); // type
3835 __ z_lgfr(Z_ARG3, Z_tos); // size
3836 call_VM(Z_RET,
3837 CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3838 Z_ARG2, Z_ARG3);
3839 }
3840
3841 void TemplateTable::anewarray() {
3842 transition(itos, atos);
3843 __ get_2_byte_integer_at_bcp(Z_ARG3, 1, InterpreterMacroAssembler::Unsigned);
3844 __ get_constant_pool(Z_ARG2);
3845 __ z_lgfr(Z_ARG4, Z_tos);
3846 call_VM(Z_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3847 Z_ARG2, Z_ARG3, Z_ARG4);
3848 }
3849
3850 void TemplateTable::arraylength() {
3851 transition(atos, itos);
3852
3853 int offset = arrayOopDesc::length_offset_in_bytes();
3854
3855 __ null_check(Z_tos, Z_R0_scratch, offset);
3856 __ mem2reg_opt(Z_tos, Address(Z_tos, offset), false);
3857 }
3858
3859 void TemplateTable::checkcast() {
3860 transition(atos, atos);
3861
3862 NearLabel done, is_null, ok_is_subtype, quicked, resolved;
3863
3864 BLOCK_COMMENT("checkcast {");
3865 // If object is NULL, we are almost done.
3866 __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null);
3867
3868 // Get cpool & tags index.
3869 Register cpool = Z_tmp_1;
3870 Register tags = Z_tmp_2;
3871 Register index = Z_ARG5;
3872
3873 __ get_cpool_and_tags(cpool, tags);
3874 __ get_2_byte_integer_at_bcp(index, 1, InterpreterMacroAssembler::Unsigned);
3875 // See if bytecode has already been quicked.
3876 // Note: For CLI, we would have to add the index to the tags pointer first,
3877 // thus load and compare in a "classic" manner.
3878 __ z_llgc(Z_R0_scratch,
3879 Address(tags, index, Array<u1>::base_offset_in_bytes()));
3880 __ compareU64_and_branch(Z_R0_scratch, JVM_CONSTANT_Class,
3881 Assembler::bcondEqual, quicked);
3882
3883 __ push(atos); // Save receiver for result, and for GC.
3884 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3885 __ get_vm_result_2(Z_tos);
3886
3887 Register receiver = Z_ARG4;
3888 Register klass = Z_tos;
3889 Register subklass = Z_ARG5;
3890
3891 __ pop_ptr(receiver); // restore receiver
3892 __ z_bru(resolved);
3893
3894 // Get superklass in klass and subklass in subklass.
3895 __ bind(quicked);
3896
3897 __ z_lgr(Z_ARG4, Z_tos); // Save receiver.
3898 __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
3899 __ load_resolved_klass_at_offset(cpool, index, klass);
3900
3901 __ bind(resolved);
3902
3903 __ load_klass(subklass, receiver);
3904
3905 // Generate subtype check. Object in receiver.
3906 // Superklass in klass. Subklass in subklass.
3907 __ gen_subtype_check(subklass, klass, Z_ARG3, Z_tmp_1, ok_is_subtype);
3908
3909 // Come here on failure.
3910 __ push_ptr(receiver);
3911 // Object is at TOS, target klass oop expected in rax by convention.
3912 __ z_brul((address) Interpreter::_throw_ClassCastException_entry);
3913
3914 // Come here on success.
3915 __ bind(ok_is_subtype);
3916
3917 __ z_lgr(Z_tos, receiver); // Restore object.
3918
3919 // Collect counts on whether this test sees NULLs a lot or not.
3920 if (ProfileInterpreter) {
3921 __ z_bru(done);
3922 __ bind(is_null);
3923 __ profile_null_seen(Z_tmp_1);
3924 } else {
3925 __ bind(is_null); // Same as 'done'.
3926 }
3927
3928 __ bind(done);
3929 BLOCK_COMMENT("} checkcast");
3930 }
3931
3932 void TemplateTable::instanceof() {
3933 transition(atos, itos);
3934
3935 NearLabel done, is_null, ok_is_subtype, quicked, resolved;
3936
3937 BLOCK_COMMENT("instanceof {");
3938 // If object is NULL, we are almost done.
3939 __ compareU64_and_branch(Z_tos, (intptr_t) 0, Assembler::bcondZero, is_null);
3940
3941 // Get cpool & tags index.
3942 Register cpool = Z_tmp_1;
3943 Register tags = Z_tmp_2;
3944 Register index = Z_ARG5;
3945
3946 __ get_cpool_and_tags(cpool, tags);
3947 __ get_2_byte_integer_at_bcp(index, 1, InterpreterMacroAssembler::Unsigned);
3948 // See if bytecode has already been quicked.
3949 // Note: For CLI, we would have to add the index to the tags pointer first,
3950 // thus load and compare in a "classic" manner.
3951 __ z_llgc(Z_R0_scratch,
3952 Address(tags, index, Array<u1>::base_offset_in_bytes()));
3953 __ compareU64_and_branch(Z_R0_scratch, JVM_CONSTANT_Class, Assembler::bcondEqual, quicked);
3954
3955 __ push(atos); // Save receiver for result, and for GC.
3956 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3957 __ get_vm_result_2(Z_tos);
3958
3959 Register receiver = Z_tmp_2;
3960 Register klass = Z_tos;
3961 Register subklass = Z_tmp_2;
3962
3963 __ pop_ptr(receiver); // Restore receiver.
3964 __ verify_oop(receiver);
3965 __ load_klass(subklass, subklass);
3966 __ z_bru(resolved);
3967
3968 // Get superklass in klass and subklass in subklass.
3969 __ bind(quicked);
3970
3971 __ load_klass(subklass, Z_tos);
3972 __ z_sllg(index, index, LogBytesPerWord); // index2bytes for addressing
3973 __ load_resolved_klass_at_offset(cpool, index, klass);
3974
3975 __ bind(resolved);
3976
3977 // Generate subtype check.
3978 // Superklass in klass. Subklass in subklass.
3979 __ gen_subtype_check(subklass, klass, Z_ARG4, Z_ARG5, ok_is_subtype);
3980
3981 // Come here on failure.
3982 __ clear_reg(Z_tos, true, false);
3983 __ z_bru(done);
3984
3985 // Come here on success.
3986 __ bind(ok_is_subtype);
3987 __ load_const_optimized(Z_tos, 1);
3988
3989 // Collect counts on whether this test sees NULLs a lot or not.
3990 if (ProfileInterpreter) {
3991 __ z_bru(done);
3992 __ bind(is_null);
3993 __ profile_null_seen(Z_tmp_1);
3994 } else {
3995 __ bind(is_null); // same as 'done'
3996 }
3997
3998 __ bind(done);
3999 // tos = 0: obj == NULL or obj is not an instanceof the specified klass
4000 // tos = 1: obj != NULL and obj is an instanceof the specified klass
4001 BLOCK_COMMENT("} instanceof");
4002 }
4003
4004 //-----------------------------------------------------------------------------
4005 // Breakpoints
4006 void TemplateTable::_breakpoint() {
4007
4008 // Note: We get here even if we are single stepping.
4009 // Jbug insists on setting breakpoints at every bytecode
4010 // even if we are in single step mode.
4011
4012 transition(vtos, vtos);
4013
4014 // Get the unpatched byte code.
4015 __ get_method(Z_ARG2);
4016 __ call_VM(noreg,
4017 CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at),
4018 Z_ARG2, Z_bcp);
4019 // Save the result to a register that is preserved over C-function calls.
4020 __ z_lgr(Z_tmp_1, Z_RET);
4021
4022 // Post the breakpoint event.
4023 __ get_method(Z_ARG2);
4024 __ call_VM(noreg,
4025 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4026 Z_ARG2, Z_bcp);
4027
4028 // Must restore the bytecode, because call_VM destroys Z_bytecode.
4029 __ z_lgr(Z_bytecode, Z_tmp_1);
4030
4031 // Complete the execution of original bytecode.
4032 __ dispatch_only_normal(vtos);
4033 }
4034
4035
4036 // Exceptions
4037
4038 void TemplateTable::athrow() {
4039 transition(atos, vtos);
4040 __ null_check(Z_tos);
4041 __ load_absolute_address(Z_ARG2, Interpreter::throw_exception_entry());
4042 __ z_br(Z_ARG2);
4043 }
4044
4045 // Synchronization
4046 //
4047 // Note: monitorenter & exit are symmetric routines; which is reflected
4048 // in the assembly code structure as well
4049 //
4050 // Stack layout:
4051 //
4052 // callers_sp <- Z_SP (callers_sp == Z_fp (own fp))
4053 // return_pc
4054 // [rest of ABI_160]
4055 // /slot o: free
4056 // / ... free
4057 // oper. | slot n+1: free <- Z_esp points to first free slot
4058 // stack | slot n: val caches IJAVA_STATE.esp
4059 // | ...
4060 // \slot 0: val
4061 // /slot m <- IJAVA_STATE.monitors = monitor block top
4062 // | ...
4063 // monitors| slot 2
4064 // | slot 1
4065 // \slot 0
4066 // /slot l <- monitor block bot
4067 // ijava_state | ...
4068 // | slot 2
4069 // \slot 0
4070 // <- Z_fp
4071 void TemplateTable::monitorenter() {
4072 transition(atos, vtos);
4073
4074 BLOCK_COMMENT("monitorenter {");
4075
4076 // Check for NULL object.
4077 __ null_check(Z_tos);
4078 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4079 NearLabel allocated;
4080 // Initialize entry pointer.
4081 const Register Rfree_slot = Z_tmp_1;
4082 __ clear_reg(Rfree_slot, true, false); // Points to free slot or NULL. Don't set CC.
4083
4084 // Find a free slot in the monitor block from top to bot (result in Rfree_slot).
4085 {
4086 const Register Rcurr_monitor = Z_ARG2;
4087 const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block.
4088 const Register Rlocked_obj = Z_ARG4;
4089 NearLabel loop, exit, not_free;
4090 // Starting with top-most entry.
4091 __ get_monitors(Rcurr_monitor); // Rcur_monitor = IJAVA_STATE.monitors
4092 __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp);
4093
4094 #ifdef ASSERT
4095 address reentry = NULL;
4096 { NearLabel ok;
4097 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok);
4098 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom");
4099 __ bind(ok);
4100 }
4101 { NearLabel ok;
4102 __ compareU64_and_branch(Rcurr_monitor, Z_esp, Assembler::bcondHigh, ok);
4103 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors above Z_esp");
4104 __ bind(ok);
4105 }
4106 #endif
4107
4108 // Check if bottom reached, i.e. if there is at least one monitor.
4109 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondEqual, exit);
4110
4111 __ bind(loop);
4112 // Check if current entry is used.
4113 __ load_and_test_long(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes()));
4114 __ z_brne(not_free);
4115 // If not used then remember entry in Rfree_slot.
4116 __ z_lgr(Rfree_slot, Rcurr_monitor);
4117 __ bind(not_free);
4118 // Exit if current entry is for same object; this guarantees, that new monitor
4119 // used for recursive lock is above the older one.
4120 __ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, exit);
4121 // otherwise advance to next entry
4122 __ add2reg(Rcurr_monitor, entry_size);
4123 // Check if bottom reached, if not at bottom then check this entry.
4124 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotEqual, loop);
4125 __ bind(exit);
4126 }
4127
4128 // Rfree_slot != NULL -> found one
4129 __ compareU64_and_branch(Rfree_slot, (intptr_t)0L, Assembler::bcondNotEqual, allocated);
4130
4131 // Allocate one if there's no free slot.
4132 __ add_monitor_to_stack(false, Z_ARG3, Z_ARG4, Z_ARG5);
4133 __ get_monitors(Rfree_slot);
4134
4135 // Rfree_slot: points to monitor entry.
4136 __ bind(allocated);
4137
4138 // Increment bcp to point to the next bytecode, so exception
4139 // handling for async. exceptions work correctly.
4140 // The object has already been poped from the stack, so the
4141 // expression stack looks correct.
4142 __ add2reg(Z_bcp, 1, Z_bcp);
4143
4144 // Store object.
4145 __ z_stg(Z_tos, BasicObjectLock::obj_offset_in_bytes(), Rfree_slot);
4146 __ lock_object(Rfree_slot, Z_tos);
4147
4148 // Check to make sure this monitor doesn't cause stack overflow after locking.
4149 __ save_bcp(); // in case of exception
4150 __ generate_stack_overflow_check(0);
4151
4152 // The bcp has already been incremented. Just need to dispatch to
4153 // next instruction.
4154 __ dispatch_next(vtos);
4155
4156 BLOCK_COMMENT("} monitorenter");
4157 }
4158
4159
4160 void TemplateTable::monitorexit() {
4161 transition(atos, vtos);
4162
4163 BLOCK_COMMENT("monitorexit {");
4164
4165 // Check for NULL object.
4166 __ null_check(Z_tos);
4167
4168 NearLabel found, not_found;
4169 const Register Rcurr_monitor = Z_ARG2;
4170
4171 // Find matching slot.
4172 {
4173 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4174 NearLabel entry, loop;
4175
4176 const Register Rbot = Z_ARG3; // Points to word under bottom of monitor block.
4177 const Register Rlocked_obj = Z_ARG4;
4178 // Starting with top-most entry.
4179 __ get_monitors(Rcurr_monitor); // Rcur_monitor = IJAVA_STATE.monitors
4180 __ add2reg(Rbot, -frame::z_ijava_state_size, Z_fp);
4181
4182 #ifdef ASSERT
4183 address reentry = NULL;
4184 { NearLabel ok;
4185 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotHigh, ok);
4186 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors points below monitor block bottom");
4187 __ bind(ok);
4188 }
4189 { NearLabel ok;
4190 __ compareU64_and_branch(Rcurr_monitor, Z_esp, Assembler::bcondHigh, ok);
4191 reentry = __ stop_chain_static(reentry, "IJAVA_STATE.monitors above Z_esp");
4192 __ bind(ok);
4193 }
4194 #endif
4195
4196 // Check if bottom reached, i.e. if there is at least one monitor.
4197 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondEqual, not_found);
4198
4199 __ bind(loop);
4200 // Check if current entry is for same object.
4201 __ z_lg(Rlocked_obj, Address(Rcurr_monitor, BasicObjectLock::obj_offset_in_bytes()));
4202 // If same object then stop searching.
4203 __ compareU64_and_branch(Rlocked_obj, Z_tos, Assembler::bcondEqual, found);
4204 // Otherwise advance to next entry.
4205 __ add2reg(Rcurr_monitor, entry_size);
4206 // Check if bottom reached, if not at bottom then check this entry.
4207 __ compareU64_and_branch(Rcurr_monitor, Rbot, Assembler::bcondNotEqual, loop);
4208 }
4209
4210 __ bind(not_found);
4211 // Error handling. Unlocking was not block-structured.
4212 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4213 InterpreterRuntime::throw_illegal_monitor_state_exception));
4214 __ should_not_reach_here();
4215
4216 __ bind(found);
4217 __ push_ptr(Z_tos); // Make sure object is on stack (contract with oopMaps).
4218 __ unlock_object(Rcurr_monitor, Z_tos);
4219 __ pop_ptr(Z_tos); // Discard object.
4220 BLOCK_COMMENT("} monitorexit");
4221 }
4222
4223 // Wide instructions
4224 void TemplateTable::wide() {
4225 transition(vtos, vtos);
4226
4227 __ z_llgc(Z_R1_scratch, at_bcp(1));
4228 __ z_sllg(Z_R1_scratch, Z_R1_scratch, LogBytesPerWord);
4229 __ load_absolute_address(Z_tmp_1, (address) Interpreter::_wentry_point);
4230 __ mem2reg_opt(Z_tmp_1, Address(Z_tmp_1, Z_R1_scratch));
4231 __ z_br(Z_tmp_1);
4232 // Note: the bcp increment step is part of the individual wide
4233 // bytecode implementations.
4234 }
4235
4236 // Multi arrays
4237 void TemplateTable::multianewarray() {
4238 transition(vtos, atos);
4239
4240 __ z_llgc(Z_tmp_1, at_bcp(3)); // Get number of dimensions.
4241 // Slot count to byte offset.
4242 __ z_sllg(Z_tmp_1, Z_tmp_1, Interpreter::logStackElementSize);
4243 // Z_esp points past last_dim, so set to Z_ARG2 to first_dim address.
4244 __ load_address(Z_ARG2, Address(Z_esp, Z_tmp_1));
4245 call_VM(Z_RET,
4246 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
4247 Z_ARG2);
4248 // Pop dimensions from expression stack.
4249 __ z_agr(Z_esp, Z_tmp_1);
4250 }
--- EOF ---