110 void LIR_Assembler::ffree(int i) {
111 Unimplemented();
112 }
113
114 void LIR_Assembler::breakpoint() {
115 __ breakpoint();
116 }
117
118 void LIR_Assembler::push(LIR_Opr opr) {
119 Unimplemented();
120 }
121
122 void LIR_Assembler::pop(LIR_Opr opr) {
123 Unimplemented();
124 }
125
126 //-------------------------------------------
127 Address LIR_Assembler::as_Address(LIR_Address* addr) {
128 Register base = addr->base()->as_pointer_register();
129
130 #ifdef AARCH64
131 int align = exact_log2(type2aelembytes(addr->type(), true));
132 #endif
133
134 if (addr->index()->is_illegal() || addr->index()->is_constant()) {
135 int offset = addr->disp();
136 if (addr->index()->is_constant()) {
137 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale();
138 }
139
140 #ifdef AARCH64
141 if (!Assembler::is_unsigned_imm_in_range(offset, 12, align) && !Assembler::is_imm_in_range(offset, 9, 0)) {
142 BAILOUT_("offset not in range", Address(base));
143 }
144 assert(UseUnalignedAccesses || (offset & right_n_bits(align)) == 0, "offset should be aligned");
145 #else
146 if ((offset <= -4096) || (offset >= 4096)) {
147 BAILOUT_("offset not in range", Address(base));
148 }
149 #endif // AARCH64
150
151 return Address(base, offset);
152
153 } else {
154 assert(addr->disp() == 0, "can't have both");
155 int scale = addr->scale();
156
157 #ifdef AARCH64
158 assert((scale == 0) || (scale == align), "scale should be zero or equal to embedded shift");
159
160 bool is_index_extended = (addr->index()->type() == T_INT);
161 if (is_index_extended) {
162 assert(addr->index()->is_single_cpu(), "should be");
163 return Address(base, addr->index()->as_register(), ex_sxtw, scale);
164 } else {
165 assert(addr->index()->is_double_cpu(), "should be");
166 return Address(base, addr->index()->as_register_lo(), ex_lsl, scale);
167 }
168 #else
169 assert(addr->index()->is_single_cpu(), "should be");
170 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) :
171 Address(base, addr->index()->as_register(), lsr, -scale);
172 #endif // AARCH64
173 }
174 }
175
176 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
177 #ifdef AARCH64
178 ShouldNotCallThis(); // Not used on AArch64
179 return Address();
180 #else
181 Address base = as_Address(addr);
182 assert(base.index() == noreg, "must be");
183 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); }
184 return Address(base.base(), base.disp() + BytesPerWord);
185 #endif // AARCH64
186 }
187
188 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
189 #ifdef AARCH64
190 ShouldNotCallThis(); // Not used on AArch64
191 return Address();
192 #else
193 return as_Address(addr);
194 #endif // AARCH64
195 }
196
197
198 void LIR_Assembler::osr_entry() {
199 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
200 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
201 ValueStack* entry_state = osr_entry->end()->state();
202 int number_of_locks = entry_state->locks_size();
203
204 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
205 Register OSR_buf = osrBufferPointer()->as_pointer_register();
206
207 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
208 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord;
209 for (int i = 0; i < number_of_locks; i++) {
210 int slot_offset = monitor_offset - (i * 2 * BytesPerWord);
211 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord));
212 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord));
213 __ str(R1, frame_map()->address_for_monitor_lock(i));
214 __ str(R2, frame_map()->address_for_monitor_object(i));
310
311 // Emit the slow path assembly
312 if (stub != NULL) {
313 stub->emit_code(this);
314 }
315
316 return offset;
317 }
318
319
320 int LIR_Assembler::emit_deopt_handler() {
321 address handler_base = __ start_a_stub(deopt_handler_size());
322 if (handler_base == NULL) {
323 bailout("deopt handler overflow");
324 return -1;
325 }
326
327 int offset = code_offset();
328
329 __ mov_relative_address(LR, __ pc());
330 #ifdef AARCH64
331 __ raw_push(LR, LR);
332 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, Rtemp);
333 #else
334 __ push(LR); // stub expects LR to be saved
335 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
336 #endif // AARCH64
337
338 assert(code_offset() - offset <= deopt_handler_size(), "overflow");
339 __ end_a_stub();
340
341 return offset;
342 }
343
344
345 void LIR_Assembler::return_op(LIR_Opr result) {
346 // Pop the frame before safepoint polling
347 __ remove_frame(initial_frame_size_in_bytes());
348
349 // mov_slow here is usually one or two instruction
350 // TODO-AARCH64 3 instructions on AArch64, so try to load polling page by ldr_literal
351 __ mov_address(Rtemp, os::get_polling_page(), symbolic_Relocation::polling_page_reference);
352 __ relocate(relocInfo::poll_return_type);
353 __ ldr(Rtemp, Address(Rtemp));
354 __ ret();
355 }
356
357
358 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
359 __ mov_address(Rtemp, os::get_polling_page(), symbolic_Relocation::polling_page_reference);
360 if (info != NULL) {
361 add_debug_info_for_branch(info);
362 }
363 int offset = __ offset();
364 __ relocate(relocInfo::poll_type);
365 __ ldr(Rtemp, Address(Rtemp));
366 return offset;
367 }
368
369
370 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
371 if (from_reg != to_reg) {
372 __ mov(to_reg, from_reg);
373 }
374 }
375
376 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
377 assert(src->is_constant() && dest->is_register(), "must be");
378 LIR_Const* c = src->as_constant_ptr();
379
380 switch (c->type()) {
381 case T_ADDRESS:
382 case T_INT:
383 assert(patch_code == lir_patch_none, "no patching handled here");
384 __ mov_slow(dest->as_register(), c->as_jint());
385 break;
386
387 case T_LONG:
388 assert(patch_code == lir_patch_none, "no patching handled here");
389 #ifdef AARCH64
390 __ mov_slow(dest->as_pointer_register(), (intptr_t)c->as_jlong());
391 #else
392 __ mov_slow(dest->as_register_lo(), c->as_jint_lo());
393 __ mov_slow(dest->as_register_hi(), c->as_jint_hi());
394 #endif // AARCH64
395 break;
396
397 case T_OBJECT:
398 if (patch_code == lir_patch_none) {
399 __ mov_oop(dest->as_register(), c->as_jobject());
400 } else {
401 jobject2reg_with_patching(dest->as_register(), info);
402 }
403 break;
404
405 case T_METADATA:
406 if (patch_code == lir_patch_none) {
407 __ mov_metadata(dest->as_register(), c->as_metadata());
408 } else {
409 klass2reg_with_patching(dest->as_register(), info);
410 }
411 break;
412
413 case T_FLOAT:
414 if (dest->is_single_fpu()) {
415 __ mov_float(dest->as_float_reg(), c->as_jfloat());
416 } else {
417 #ifdef AARCH64
418 ShouldNotReachHere();
419 #else
420 // Simple getters can return float constant directly into r0
421 __ mov_slow(dest->as_register(), c->as_jint_bits());
422 #endif // AARCH64
423 }
424 break;
425
426 case T_DOUBLE:
427 if (dest->is_double_fpu()) {
428 __ mov_double(dest->as_double_reg(), c->as_jdouble());
429 } else {
430 #ifdef AARCH64
431 ShouldNotReachHere();
432 #else
433 // Simple getters can return double constant directly into r1r0
434 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits());
435 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits());
436 #endif // AARCH64
437 }
438 break;
439
440 default:
441 ShouldNotReachHere();
442 }
443 }
444
445 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
446 assert(src->is_constant(), "must be");
447 assert(dest->is_stack(), "must be");
448 LIR_Const* c = src->as_constant_ptr();
449
450 switch (c->type()) {
451 case T_INT: // fall through
452 case T_FLOAT:
453 __ mov_slow(Rtemp, c->as_jint_bits());
454 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
455 break;
456
457 case T_ADDRESS:
458 __ mov_slow(Rtemp, c->as_jint());
459 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
460 break;
461
462 case T_OBJECT:
463 __ mov_oop(Rtemp, c->as_jobject());
464 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
465 break;
466
467 case T_LONG: // fall through
468 case T_DOUBLE:
469 #ifdef AARCH64
470 __ mov_slow(Rtemp, c->as_jlong_bits());
471 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix()));
472 #else
473 __ mov_slow(Rtemp, c->as_jint_lo_bits());
474 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
475 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) {
476 __ mov_slow(Rtemp, c->as_jint_hi_bits());
477 }
478 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
479 #endif // AARCH64
480 break;
481
482 default:
483 ShouldNotReachHere();
484 }
485 }
486
487 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
488 CodeEmitInfo* info, bool wide) {
489 #ifdef AARCH64
490 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL) ||
491 (src->as_constant_ptr()->type() == T_INT && src->as_constant_ptr()->as_jint() == 0) ||
492 (src->as_constant_ptr()->type() == T_LONG && src->as_constant_ptr()->as_jlong() == 0) ||
493 (src->as_constant_ptr()->type() == T_FLOAT && src->as_constant_ptr()->as_jint_bits() == 0) ||
494 (src->as_constant_ptr()->type() == T_DOUBLE && src->as_constant_ptr()->as_jlong_bits() == 0),
495 "cannot handle otherwise");
496 assert(dest->as_address_ptr()->type() == type, "should be");
497
498 Address addr = as_Address(dest->as_address_ptr());
499 int null_check_offset = code_offset();
500 switch (type) {
501 case T_OBJECT: // fall through
502 case T_ARRAY:
503 if (UseCompressedOops && !wide) {
504 __ str_w(ZR, addr);
505 } else {
506 __ str(ZR, addr);
507 }
508 break;
509 case T_ADDRESS: // fall through
510 case T_DOUBLE: // fall through
511 case T_LONG: __ str(ZR, addr); break;
512 case T_FLOAT: // fall through
513 case T_INT: __ str_w(ZR, addr); break;
514 case T_BOOLEAN: // fall through
515 case T_BYTE: __ strb(ZR, addr); break;
516 case T_CHAR: // fall through
517 case T_SHORT: __ strh(ZR, addr); break;
518 default: ShouldNotReachHere();
519 }
520 #else
521 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL),"cannot handle otherwise");
522 __ mov(Rtemp, 0);
523
524 int null_check_offset = code_offset();
525 __ str(Rtemp, as_Address(dest->as_address_ptr()));
526 #endif // AARCH64
527
528 if (info != NULL) {
529 #ifndef AARCH64
530 assert(false, "arm32 didn't support this before, investigate if bug");
531 #endif
532 add_debug_info_for_null_check(null_check_offset, info);
533 }
534 }
535
536 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
537 assert(src->is_register() && dest->is_register(), "must be");
538
539 if (src->is_single_cpu()) {
540 if (dest->is_single_cpu()) {
541 move_regs(src->as_register(), dest->as_register());
542 #ifdef AARCH64
543 } else if (dest->is_double_cpu()) {
544 assert ((src->type() == T_OBJECT) || (src->type() == T_ARRAY) || (src->type() == T_ADDRESS), "invalid src type");
545 move_regs(src->as_register(), dest->as_register_lo());
546 #else
547 } else if (dest->is_single_fpu()) {
548 __ fmsr(dest->as_float_reg(), src->as_register());
549 #endif // AARCH64
550 } else {
551 ShouldNotReachHere();
552 }
553 } else if (src->is_double_cpu()) {
554 #ifdef AARCH64
555 move_regs(src->as_register_lo(), dest->as_register_lo());
556 #else
557 if (dest->is_double_cpu()) {
558 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi());
559 } else {
560 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi());
561 }
562 #endif // AARCH64
563 } else if (src->is_single_fpu()) {
564 if (dest->is_single_fpu()) {
565 __ mov_float(dest->as_float_reg(), src->as_float_reg());
566 } else if (dest->is_single_cpu()) {
567 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg());
568 } else {
569 ShouldNotReachHere();
570 }
571 } else if (src->is_double_fpu()) {
572 if (dest->is_double_fpu()) {
573 __ mov_double(dest->as_double_reg(), src->as_double_reg());
574 } else if (dest->is_double_cpu()) {
575 #ifdef AARCH64
576 __ fmov_xd(dest->as_register_lo(), src->as_double_reg());
577 #else
578 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg());
579 #endif // AARCH64
580 } else {
581 ShouldNotReachHere();
582 }
583 } else {
584 ShouldNotReachHere();
585 }
586 }
587
588 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
589 assert(src->is_register(), "should not call otherwise");
590 assert(dest->is_stack(), "should not call otherwise");
591
592 Address addr = dest->is_single_word() ?
593 frame_map()->address_for_slot(dest->single_stack_ix()) :
594 frame_map()->address_for_slot(dest->double_stack_ix());
595
596 #ifndef AARCH64
597 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
598 if (src->is_single_fpu() || src->is_double_fpu()) {
599 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
600 }
601 #endif // !AARCH64
602
603 if (src->is_single_cpu()) {
604 switch (type) {
605 case T_OBJECT:
606 case T_ARRAY: __ verify_oop(src->as_register()); // fall through
607 case T_ADDRESS:
608 case T_METADATA: __ str(src->as_register(), addr); break;
609 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through
610 case T_INT: __ str_32(src->as_register(), addr); break;
611 default:
612 ShouldNotReachHere();
613 }
614 } else if (src->is_double_cpu()) {
615 __ str(src->as_register_lo(), addr);
616 #ifndef AARCH64
617 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
618 #endif // !AARCH64
619 } else if (src->is_single_fpu()) {
620 __ str_float(src->as_float_reg(), addr);
621 } else if (src->is_double_fpu()) {
622 __ str_double(src->as_double_reg(), addr);
623 } else {
624 ShouldNotReachHere();
625 }
626 }
627
628
629 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
630 LIR_PatchCode patch_code, CodeEmitInfo* info,
631 bool pop_fpu_stack, bool wide,
632 bool unaligned) {
633 LIR_Address* to_addr = dest->as_address_ptr();
634 Register base_reg = to_addr->base()->as_pointer_register();
635 const bool needs_patching = (patch_code != lir_patch_none);
636
637 PatchingStub* patch = NULL;
638 if (needs_patching) {
639 #ifdef AARCH64
640 // Same alignment of reg2mem code and PatchingStub code. Required to make copied bind_literal() code properly aligned.
641 __ align(wordSize);
642 #endif
643 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
644 #ifdef AARCH64
645 // Extra nop for MT safe patching
646 __ nop();
647 #endif // AARCH64
648 }
649
650 int null_check_offset = code_offset();
651
652 switch (type) {
653 case T_ARRAY:
654 case T_OBJECT:
655 if (UseCompressedOops && !wide) {
656 #ifdef AARCH64
657 const Register temp_src = Rtemp;
658 assert_different_registers(temp_src, src->as_register());
659 __ encode_heap_oop(temp_src, src->as_register());
660 null_check_offset = code_offset();
661 __ str_32(temp_src, as_Address(to_addr));
662 #else
663 ShouldNotReachHere();
664 #endif // AARCH64
665 } else {
666 __ str(src->as_register(), as_Address(to_addr));
667 }
668 break;
669
670 case T_ADDRESS:
671 #ifdef AARCH64
672 case T_LONG:
673 #endif // AARCH64
674 __ str(src->as_pointer_register(), as_Address(to_addr));
675 break;
676
677 case T_BYTE:
678 case T_BOOLEAN:
679 __ strb(src->as_register(), as_Address(to_addr));
680 break;
681
682 case T_CHAR:
683 case T_SHORT:
684 __ strh(src->as_register(), as_Address(to_addr));
685 break;
686
687 case T_INT:
688 #ifdef __SOFTFP__
689 case T_FLOAT:
690 #endif // __SOFTFP__
691 __ str_32(src->as_register(), as_Address(to_addr));
692 break;
693
694 #ifdef AARCH64
695
696 case T_FLOAT:
697 __ str_s(src->as_float_reg(), as_Address(to_addr));
698 break;
699
700 case T_DOUBLE:
701 __ str_d(src->as_double_reg(), as_Address(to_addr));
702 break;
703
704 #else // AARCH64
705
706 #ifdef __SOFTFP__
707 case T_DOUBLE:
708 #endif // __SOFTFP__
709 case T_LONG: {
710 Register from_lo = src->as_register_lo();
711 Register from_hi = src->as_register_hi();
712 if (to_addr->index()->is_register()) {
713 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
714 assert(to_addr->disp() == 0, "Not yet supporting both");
715 __ add(Rtemp, base_reg, to_addr->index()->as_register());
716 base_reg = Rtemp;
717 __ str(from_lo, Address(Rtemp));
718 if (patch != NULL) {
719 patching_epilog(patch, lir_patch_low, base_reg, info);
720 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
721 patch_code = lir_patch_high;
722 }
723 __ str(from_hi, Address(Rtemp, BytesPerWord));
724 } else if (base_reg == from_lo) {
748 __ add(Rtemp, base_reg, to_addr->index()->as_register());
749 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
750 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp()));
751 } else {
752 __ fsts(src->as_float_reg(), as_Address(to_addr));
753 }
754 break;
755
756 case T_DOUBLE:
757 if (to_addr->index()->is_register()) {
758 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
759 __ add(Rtemp, base_reg, to_addr->index()->as_register());
760 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
761 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp()));
762 } else {
763 __ fstd(src->as_double_reg(), as_Address(to_addr));
764 }
765 break;
766 #endif // __SOFTFP__
767
768 #endif // AARCH64
769
770 default:
771 ShouldNotReachHere();
772 }
773
774 if (info != NULL) {
775 add_debug_info_for_null_check(null_check_offset, info);
776 }
777
778 if (patch != NULL) {
779 // Offset embeedded into LDR/STR instruction may appear not enough
780 // to address a field. So, provide a space for one more instruction
781 // that will deal with larger offsets.
782 __ nop();
783 patching_epilog(patch, patch_code, base_reg, info);
784 }
785 }
786
787
788 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
789 assert(src->is_stack(), "should not call otherwise");
790 assert(dest->is_register(), "should not call otherwise");
791
792 Address addr = src->is_single_word() ?
793 frame_map()->address_for_slot(src->single_stack_ix()) :
794 frame_map()->address_for_slot(src->double_stack_ix());
795
796 #ifndef AARCH64
797 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
798 if (dest->is_single_fpu() || dest->is_double_fpu()) {
799 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
800 }
801 #endif // !AARCH64
802
803 if (dest->is_single_cpu()) {
804 switch (type) {
805 case T_OBJECT:
806 case T_ARRAY:
807 case T_ADDRESS:
808 case T_METADATA: __ ldr(dest->as_register(), addr); break;
809 case T_FLOAT: // used in floatToRawIntBits intrinsic implemenation
810 case T_INT: __ ldr_u32(dest->as_register(), addr); break;
811 default:
812 ShouldNotReachHere();
813 }
814 if ((type == T_OBJECT) || (type == T_ARRAY)) {
815 __ verify_oop(dest->as_register());
816 }
817 } else if (dest->is_double_cpu()) {
818 __ ldr(dest->as_register_lo(), addr);
819 #ifndef AARCH64
820 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
821 #endif // !AARCH64
822 } else if (dest->is_single_fpu()) {
823 __ ldr_float(dest->as_float_reg(), addr);
824 } else if (dest->is_double_fpu()) {
825 __ ldr_double(dest->as_double_reg(), addr);
826 } else {
827 ShouldNotReachHere();
828 }
829 }
830
831
832 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
833 if (src->is_single_stack()) {
834 switch (src->type()) {
835 case T_OBJECT:
836 case T_ARRAY:
837 case T_ADDRESS:
838 case T_METADATA:
839 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
840 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
841 break;
842
843 case T_INT:
844 case T_FLOAT:
845 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
846 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
847 break;
848
849 default:
850 ShouldNotReachHere();
851 }
852 } else {
853 assert(src->is_double_stack(), "must be");
854 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes));
855 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
856 #ifdef AARCH64
857 assert(lo_word_offset_in_bytes == 0, "adjust this code");
858 #else
859 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
860 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
861 #endif // AARCH64
862 }
863 }
864
865
866 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
867 LIR_PatchCode patch_code, CodeEmitInfo* info,
868 bool wide, bool unaligned) {
869 assert(src->is_address(), "should not call otherwise");
870 assert(dest->is_register(), "should not call otherwise");
871 LIR_Address* addr = src->as_address_ptr();
872
873 Register base_reg = addr->base()->as_pointer_register();
874
875 PatchingStub* patch = NULL;
876 if (patch_code != lir_patch_none) {
877 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
878 #ifdef AARCH64
879 // Extra nop for MT safe patching
880 __ nop();
881 #endif // AARCH64
882 }
883 if (info != NULL) {
884 add_debug_info_for_null_check_here(info);
885 }
886
887 switch (type) {
888 case T_OBJECT: // fall through
889 case T_ARRAY:
890 if (UseCompressedOops && !wide) {
891 __ ldr_u32(dest->as_register(), as_Address(addr));
892 } else {
893 __ ldr(dest->as_register(), as_Address(addr));
894 }
895 break;
896
897 case T_ADDRESS:
898 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
899 __ ldr_u32(dest->as_pointer_register(), as_Address(addr));
900 } else {
901 __ ldr(dest->as_pointer_register(), as_Address(addr));
902 }
903 break;
904
905 #ifdef AARCH64
906 case T_LONG:
907 #else
908 case T_INT:
909 #ifdef __SOFTFP__
910 case T_FLOAT:
911 #endif // __SOFTFP__
912 #endif // AARCH64
913 __ ldr(dest->as_pointer_register(), as_Address(addr));
914 break;
915
916 case T_BOOLEAN:
917 __ ldrb(dest->as_register(), as_Address(addr));
918 break;
919
920 case T_BYTE:
921 __ ldrsb(dest->as_register(), as_Address(addr));
922 break;
923
924 case T_CHAR:
925 __ ldrh(dest->as_register(), as_Address(addr));
926 break;
927
928 case T_SHORT:
929 __ ldrsh(dest->as_register(), as_Address(addr));
930 break;
931
932 #ifdef AARCH64
933
934 case T_INT:
935 __ ldr_w(dest->as_register(), as_Address(addr));
936 break;
937
938 case T_FLOAT:
939 __ ldr_s(dest->as_float_reg(), as_Address(addr));
940 break;
941
942 case T_DOUBLE:
943 __ ldr_d(dest->as_double_reg(), as_Address(addr));
944 break;
945
946 #else // AARCH64
947
948 #ifdef __SOFTFP__
949 case T_DOUBLE:
950 #endif // __SOFTFP__
951 case T_LONG: {
952 Register to_lo = dest->as_register_lo();
953 Register to_hi = dest->as_register_hi();
954 if (addr->index()->is_register()) {
955 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
956 assert(addr->disp() == 0, "Not yet supporting both");
957 __ add(Rtemp, base_reg, addr->index()->as_register());
958 base_reg = Rtemp;
959 __ ldr(to_lo, Address(Rtemp));
960 if (patch != NULL) {
961 patching_epilog(patch, lir_patch_low, base_reg, info);
962 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
963 patch_code = lir_patch_high;
964 }
965 __ ldr(to_hi, Address(Rtemp, BytesPerWord));
966 } else if (base_reg == to_lo) {
990 __ add(Rtemp, base_reg, addr->index()->as_register());
991 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
992 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp()));
993 } else {
994 __ flds(dest->as_float_reg(), as_Address(addr));
995 }
996 break;
997
998 case T_DOUBLE:
999 if (addr->index()->is_register()) {
1000 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
1001 __ add(Rtemp, base_reg, addr->index()->as_register());
1002 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
1003 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp()));
1004 } else {
1005 __ fldd(dest->as_double_reg(), as_Address(addr));
1006 }
1007 break;
1008 #endif // __SOFTFP__
1009
1010 #endif // AARCH64
1011
1012 default:
1013 ShouldNotReachHere();
1014 }
1015
1016 if (patch != NULL) {
1017 // Offset embeedded into LDR/STR instruction may appear not enough
1018 // to address a field. So, provide a space for one more instruction
1019 // that will deal with larger offsets.
1020 __ nop();
1021 patching_epilog(patch, patch_code, base_reg, info);
1022 }
1023
1024 #ifdef AARCH64
1025 switch (type) {
1026 case T_ARRAY:
1027 case T_OBJECT:
1028 if (UseCompressedOops && !wide) {
1029 __ decode_heap_oop(dest->as_register());
1030 }
1031 __ verify_oop(dest->as_register());
1032 break;
1033
1034 case T_ADDRESS:
1035 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1036 __ decode_klass_not_null(dest->as_register());
1037 }
1038 break;
1039 }
1040 #endif // AARCH64
1041 }
1042
1043
1044 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1045 bool is_32 = op->result_opr()->is_single_cpu();
1046
1047 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) {
1048 int c = op->in_opr2()->as_constant_ptr()->as_jint();
1049 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register");
1050
1051 Register left = op->in_opr1()->as_register();
1052 Register dest = op->result_opr()->as_register();
1053 if (c == 1) {
1054 __ mov(dest, left);
1055 } else if (c == 2) {
1056 __ add_32(dest, left, AsmOperand(left, lsr, 31));
1057 __ asr_32(dest, dest, 1);
1058 } else if (c != (int) 0x80000000) {
1059 int power = log2_intptr(c);
1060 __ asr_32(Rtemp, left, 31);
1061 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0);
1062 __ asr_32(dest, dest, power); // dest = dest >>> power;
1063 } else {
1064 // x/0x80000000 is a special case, since dividend is a power of two, but is negative.
1065 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000.
1066 __ cmp_32(left, c);
1067 #ifdef AARCH64
1068 __ cset(dest, eq);
1069 #else
1070 __ mov(dest, 0, ne);
1071 __ mov(dest, 1, eq);
1072 #endif // AARCH64
1073 }
1074 } else {
1075 #ifdef AARCH64
1076 Register left = op->in_opr1()->as_pointer_register();
1077 Register right = op->in_opr2()->as_pointer_register();
1078 Register dest = op->result_opr()->as_pointer_register();
1079
1080 switch (op->code()) {
1081 case lir_idiv:
1082 if (is_32) {
1083 __ sdiv_w(dest, left, right);
1084 } else {
1085 __ sdiv(dest, left, right);
1086 }
1087 break;
1088 case lir_irem: {
1089 Register tmp = op->in_opr3()->as_pointer_register();
1090 assert_different_registers(left, tmp);
1091 assert_different_registers(right, tmp);
1092 if (is_32) {
1093 __ sdiv_w(tmp, left, right);
1094 __ msub_w(dest, right, tmp, left);
1095 } else {
1096 __ sdiv(tmp, left, right);
1097 __ msub(dest, right, tmp, left);
1098 }
1099 break;
1100 }
1101 default:
1102 ShouldNotReachHere();
1103 }
1104 #else
1105 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3");
1106 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
1107 add_debug_info_for_div0_here(op->info());
1108 #endif // AARCH64
1109 }
1110 }
1111
1112
1113 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1114 #ifdef ASSERT
1115 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1116 if (op->block() != NULL) _branch_target_blocks.append(op->block());
1117 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1118 assert(op->info() == NULL, "CodeEmitInfo?");
1119 #endif // ASSERT
1120
1121 #ifdef __SOFTFP__
1122 assert (op->code() != lir_cond_float_branch, "this should be impossible");
1123 #else
1124 if (op->code() == lir_cond_float_branch) {
1125 #ifndef AARCH64
1126 __ fmstat();
1127 #endif // !AARCH64
1128 __ b(*(op->ublock()->label()), vs);
1129 }
1130 #endif // __SOFTFP__
1131
1132 AsmCondition acond = al;
1133 switch (op->cond()) {
1134 case lir_cond_equal: acond = eq; break;
1135 case lir_cond_notEqual: acond = ne; break;
1136 case lir_cond_less: acond = lt; break;
1137 case lir_cond_lessEqual: acond = le; break;
1138 case lir_cond_greaterEqual: acond = ge; break;
1139 case lir_cond_greater: acond = gt; break;
1140 case lir_cond_aboveEqual: acond = hs; break;
1141 case lir_cond_belowEqual: acond = ls; break;
1142 default: assert(op->cond() == lir_cond_always, "must be");
1143 }
1144 __ b(*(op->label()), acond);
1145 }
1146
1147
1148 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1149 LIR_Opr src = op->in_opr();
1150 LIR_Opr dest = op->result_opr();
1151
1152 switch (op->bytecode()) {
1153 case Bytecodes::_i2l:
1154 #ifdef AARCH64
1155 __ sign_extend(dest->as_register_lo(), src->as_register(), 32);
1156 #else
1157 move_regs(src->as_register(), dest->as_register_lo());
1158 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31));
1159 #endif // AARCH64
1160 break;
1161 case Bytecodes::_l2i:
1162 move_regs(src->as_register_lo(), dest->as_register());
1163 break;
1164 case Bytecodes::_i2b:
1165 __ sign_extend(dest->as_register(), src->as_register(), 8);
1166 break;
1167 case Bytecodes::_i2s:
1168 __ sign_extend(dest->as_register(), src->as_register(), 16);
1169 break;
1170 case Bytecodes::_i2c:
1171 __ zero_extend(dest->as_register(), src->as_register(), 16);
1172 break;
1173 case Bytecodes::_f2d:
1174 __ convert_f2d(dest->as_double_reg(), src->as_float_reg());
1175 break;
1176 case Bytecodes::_d2f:
1177 __ convert_d2f(dest->as_float_reg(), src->as_double_reg());
1178 break;
1179 case Bytecodes::_i2f:
1180 #ifdef AARCH64
1181 __ scvtf_sw(dest->as_float_reg(), src->as_register());
1182 #else
1183 __ fmsr(Stemp, src->as_register());
1184 __ fsitos(dest->as_float_reg(), Stemp);
1185 #endif // AARCH64
1186 break;
1187 case Bytecodes::_i2d:
1188 #ifdef AARCH64
1189 __ scvtf_dw(dest->as_double_reg(), src->as_register());
1190 #else
1191 __ fmsr(Stemp, src->as_register());
1192 __ fsitod(dest->as_double_reg(), Stemp);
1193 #endif // AARCH64
1194 break;
1195 case Bytecodes::_f2i:
1196 #ifdef AARCH64
1197 __ fcvtzs_ws(dest->as_register(), src->as_float_reg());
1198 #else
1199 __ ftosizs(Stemp, src->as_float_reg());
1200 __ fmrs(dest->as_register(), Stemp);
1201 #endif // AARCH64
1202 break;
1203 case Bytecodes::_d2i:
1204 #ifdef AARCH64
1205 __ fcvtzs_wd(dest->as_register(), src->as_double_reg());
1206 #else
1207 __ ftosizd(Stemp, src->as_double_reg());
1208 __ fmrs(dest->as_register(), Stemp);
1209 #endif // AARCH64
1210 break;
1211 #ifdef AARCH64
1212 case Bytecodes::_l2f:
1213 __ scvtf_sx(dest->as_float_reg(), src->as_register_lo());
1214 break;
1215 case Bytecodes::_l2d:
1216 __ scvtf_dx(dest->as_double_reg(), src->as_register_lo());
1217 break;
1218 case Bytecodes::_f2l:
1219 __ fcvtzs_xs(dest->as_register_lo(), src->as_float_reg());
1220 break;
1221 case Bytecodes::_d2l:
1222 __ fcvtzs_xd(dest->as_register_lo(), src->as_double_reg());
1223 break;
1224 #endif // AARCH64
1225 default:
1226 ShouldNotReachHere();
1227 }
1228 }
1229
1230
1231 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1232 if (op->init_check()) {
1233 Register tmp = op->tmp1()->as_register();
1234 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1235 add_debug_info_for_null_check_here(op->stub()->info());
1236 __ cmp(tmp, InstanceKlass::fully_initialized);
1237 __ b(*op->stub()->entry(), ne);
1238 }
1239 __ allocate_object(op->obj()->as_register(),
1240 op->tmp1()->as_register(),
1241 op->tmp2()->as_register(),
1242 op->tmp3()->as_register(),
1243 op->header_size(),
1244 op->object_size(),
1310 md = method->method_data_or_null();
1311 assert(md != NULL, "Sanity");
1312 data = md->bci_to_data(bci);
1313 assert(data != NULL, "need data for checkcast");
1314 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1315 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) {
1316 // The offset is large so bias the mdo by the base of the slot so
1317 // that the ldr can use an immediate offset to reference the slots of the data
1318 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
1319 }
1320 }
1321
1322 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null).
1323 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci,
1324 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias,
1325 Register obj, Register mdo, Register data_val, Label* obj_is_null) {
1326 assert(method != NULL, "Should have method");
1327 assert_different_registers(obj, mdo, data_val);
1328 setup_md_access(method, bci, md, data, mdo_offset_bias);
1329 Label not_null;
1330 #ifdef AARCH64
1331 __ cbnz(obj, not_null);
1332 #else
1333 __ b(not_null, ne);
1334 #endif // AARCH64
1335 __ mov_metadata(mdo, md->constant_encoding());
1336 if (mdo_offset_bias > 0) {
1337 __ mov_slow(data_val, mdo_offset_bias);
1338 __ add(mdo, mdo, data_val);
1339 }
1340 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
1341 __ ldrb(data_val, flags_addr);
1342 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant());
1343 __ strb(data_val, flags_addr);
1344 __ b(*obj_is_null);
1345 __ bind(not_null);
1346 }
1347
1348 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias,
1349 Register mdo, Register recv, Register value, Register tmp1,
1350 Label* profile_cast_success, Label* profile_cast_failure,
1351 Label* success, Label* failure) {
1352 assert_different_registers(mdo, value, tmp1);
1353 __ bind(*profile_cast_success);
1354 __ mov_metadata(mdo, md->constant_encoding());
1356 __ mov_slow(tmp1, mdo_offset_bias);
1357 __ add(mdo, mdo, tmp1);
1358 }
1359 __ load_klass(recv, value);
1360 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
1361 __ b(*success);
1362 // Cast failure case
1363 __ bind(*profile_cast_failure);
1364 __ mov_metadata(mdo, md->constant_encoding());
1365 if (mdo_offset_bias > 0) {
1366 __ mov_slow(tmp1, mdo_offset_bias);
1367 __ add(mdo, mdo, tmp1);
1368 }
1369 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
1370 __ ldr(tmp1, data_addr);
1371 __ sub(tmp1, tmp1, DataLayout::counter_increment);
1372 __ str(tmp1, data_addr);
1373 __ b(*failure);
1374 }
1375
1376 // Sets `res` to true, if `cond` holds. On AArch64 also sets `res` to false if `cond` does not hold.
1377 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) {
1378 #ifdef AARCH64
1379 __ cset(res, cond);
1380 #else
1381 __ mov(res, 1, cond);
1382 #endif // AARCH64
1383 }
1384
1385
1386 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1387 // TODO: ARM - can be more effective with one more register
1388 switch (op->code()) {
1389 case lir_store_check: {
1390 CodeStub* stub = op->stub();
1391 Register value = op->object()->as_register();
1392 Register array = op->array()->as_register();
1393 Register klass_RInfo = op->tmp1()->as_register();
1394 Register k_RInfo = op->tmp2()->as_register();
1395 assert_different_registers(klass_RInfo, k_RInfo, Rtemp);
1396 if (op->should_profile()) {
1397 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp);
1398 }
1399
1400 // check if it needs to be profiled
1401 ciMethodData* md;
1402 ciProfileData* data;
1403 int mdo_offset_bias = 0;
1404 Label profile_cast_success, profile_cast_failure, done;
1405 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1406 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1407
1408 if (op->should_profile()) {
1409 #ifndef AARCH64
1410 __ cmp(value, 0);
1411 #endif // !AARCH64
1412 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done);
1413 } else {
1414 __ cbz(value, done);
1415 }
1416 assert_different_registers(k_RInfo, value);
1417 add_debug_info_for_null_check_here(op->info_for_exception());
1418 __ load_klass(k_RInfo, array);
1419 __ load_klass(klass_RInfo, value);
1420 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1421 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1422 // check for immediate positive hit
1423 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1424 __ cmp(klass_RInfo, k_RInfo);
1425 __ cond_cmp(Rtemp, k_RInfo, ne);
1426 __ b(*success_target, eq);
1427 // check for immediate negative hit
1428 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1429 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1430 __ b(*failure_target, ne);
1431 // slow case
1453 Register klass_RInfo = op->tmp1()->as_register();
1454 Register k_RInfo = op->tmp2()->as_register();
1455 ciKlass* k = op->klass();
1456 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp);
1457
1458 if (stub->is_simple_exception_stub()) {
1459 // TODO: ARM - Late binding is used to prevent confusion of register allocator
1460 assert(stub->is_exception_throw_stub(), "must be");
1461 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr());
1462 }
1463 ciMethodData* md;
1464 ciProfileData* data;
1465 int mdo_offset_bias = 0;
1466
1467 Label done;
1468
1469 Label profile_cast_failure, profile_cast_success;
1470 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry();
1471 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1472
1473 #ifdef AARCH64
1474 move_regs(obj, res);
1475 if (op->should_profile()) {
1476 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1477 } else {
1478 __ cbz(obj, done);
1479 }
1480 if (k->is_loaded()) {
1481 __ mov_metadata(k_RInfo, k->constant_encoding());
1482 } else {
1483 if (res != obj) {
1484 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res));
1485 }
1486 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1487 }
1488 __ load_klass(klass_RInfo, res);
1489
1490 if (op->fast_check()) {
1491 __ cmp(klass_RInfo, k_RInfo);
1492 __ b(*failure_target, ne);
1493 } else if (k->is_loaded()) {
1494 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1495 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1496 __ cmp(Rtemp, k_RInfo);
1497 __ b(*failure_target, ne);
1498 } else {
1499 __ cmp(klass_RInfo, k_RInfo);
1500 __ cond_cmp(Rtemp, k_RInfo, ne);
1501 __ b(*success_target, eq);
1502 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1503 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1504 __ cbz(R0, *failure_target);
1505 }
1506 } else {
1507 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1508 // check for immediate positive hit
1509 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1510 __ cmp(klass_RInfo, k_RInfo);
1511 __ cond_cmp(Rtemp, k_RInfo, ne);
1512 __ b(*success_target, eq);
1513 // check for immediate negative hit
1514 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1515 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1516 __ b(*failure_target, ne);
1517 // slow case
1518 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1519 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1520 __ cbz(R0, *failure_target);
1521 }
1522
1523 #else // AARCH64
1524
1525 __ movs(res, obj);
1526 if (op->should_profile()) {
1527 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1528 } else {
1529 __ b(done, eq);
1530 }
1531 if (k->is_loaded()) {
1532 __ mov_metadata(k_RInfo, k->constant_encoding());
1533 } else if (k_RInfo != obj) {
1534 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1535 __ movs(res, obj);
1536 } else {
1537 // Patching doesn't update "res" register after GC, so do patching first
1538 klass2reg_with_patching(Rtemp, op->info_for_patch());
1539 __ movs(res, obj);
1540 __ mov(k_RInfo, Rtemp);
1541 }
1542 __ load_klass(klass_RInfo, res, ne);
1543
1558 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1559 __ cbz(R0, *failure_target);
1560 }
1561 } else {
1562 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1563 __ b(*success_target, eq);
1564 // check for immediate positive hit
1565 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1566 __ cmp(klass_RInfo, k_RInfo);
1567 __ cmp(Rtemp, k_RInfo, ne);
1568 __ b(*success_target, eq);
1569 // check for immediate negative hit
1570 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1571 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1572 __ b(*failure_target, ne);
1573 // slow case
1574 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1575 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1576 __ cbz(R0, *failure_target);
1577 }
1578 #endif // AARCH64
1579
1580 if (op->should_profile()) {
1581 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1582 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1583 &profile_cast_success, &profile_cast_failure,
1584 &done, stub->entry());
1585 }
1586 __ bind(done);
1587 break;
1588 }
1589
1590 case lir_instanceof: {
1591 Register obj = op->object()->as_register();
1592 Register res = op->result_opr()->as_register();
1593 Register klass_RInfo = op->tmp1()->as_register();
1594 Register k_RInfo = op->tmp2()->as_register();
1595 ciKlass* k = op->klass();
1596 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp);
1597
1598 ciMethodData* md;
1599 ciProfileData* data;
1600 int mdo_offset_bias = 0;
1601
1602 Label done;
1603
1604 Label profile_cast_failure, profile_cast_success;
1605 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done;
1606 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1607
1608 #ifdef AARCH64
1609 move_regs(obj, res);
1610 #else
1611 __ movs(res, obj);
1612 #endif // AARCH64
1613
1614 if (op->should_profile()) {
1615 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1616 } else {
1617 #ifdef AARCH64
1618 __ cbz(obj, done); // If obj == NULL, res is false
1619 #else
1620 __ b(done, eq);
1621 #endif // AARCH64
1622 }
1623
1624 if (k->is_loaded()) {
1625 __ mov_metadata(k_RInfo, k->constant_encoding());
1626 } else {
1627 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res));
1628 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1629 }
1630 __ load_klass(klass_RInfo, res);
1631
1632 #ifndef AARCH64
1633 if (!op->should_profile()) {
1634 __ mov(res, 0);
1635 }
1636 #endif // !AARCH64
1637
1638 if (op->fast_check()) {
1639 __ cmp(klass_RInfo, k_RInfo);
1640 if (!op->should_profile()) {
1641 set_instanceof_result(_masm, res, eq);
1642 } else {
1643 __ b(profile_cast_failure, ne);
1644 }
1645 } else if (k->is_loaded()) {
1646 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1647 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1648 __ cmp(Rtemp, k_RInfo);
1649 if (!op->should_profile()) {
1650 set_instanceof_result(_masm, res, eq);
1651 } else {
1652 __ b(profile_cast_failure, ne);
1653 }
1654 } else {
1655 __ cmp(klass_RInfo, k_RInfo);
1656 __ cond_cmp(Rtemp, k_RInfo, ne);
1657 if (!op->should_profile()) {
1658 set_instanceof_result(_masm, res, eq);
1659 }
1660 __ b(*success_target, eq);
1661 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1662 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1663 if (!op->should_profile()) {
1664 move_regs(R0, res);
1665 } else {
1666 __ cbz(R0, *failure_target);
1667 }
1668 }
1669 } else {
1670 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1671 // check for immediate positive hit
1672 __ cmp(klass_RInfo, k_RInfo);
1673 if (!op->should_profile()) {
1674 #ifdef AARCH64
1675 // TODO-AARCH64 check if separate conditional branch is more efficient than ldr+cond_cmp
1676 __ ldr(res, Address(klass_RInfo, Rtemp));
1677 #else
1678 __ ldr(res, Address(klass_RInfo, Rtemp), ne);
1679 #endif // AARCH64
1680 __ cond_cmp(res, k_RInfo, ne);
1681 set_instanceof_result(_masm, res, eq);
1682 } else {
1683 #ifdef AARCH64
1684 // TODO-AARCH64 check if separate conditional branch is more efficient than ldr+cond_cmp
1685 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1686 #else
1687 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne);
1688 #endif // AARCH64
1689 __ cond_cmp(Rtemp, k_RInfo, ne);
1690 }
1691 __ b(*success_target, eq);
1692 // check for immediate negative hit
1693 if (op->should_profile()) {
1694 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1695 }
1696 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1697 if (!op->should_profile()) {
1698 #ifdef AARCH64
1699 __ mov(res, 0);
1700 #else
1701 __ mov(res, 0, ne);
1702 #endif // AARCH64
1703 }
1704 __ b(*failure_target, ne);
1705 // slow case
1706 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1707 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1708 if (!op->should_profile()) {
1709 move_regs(R0, res);
1710 }
1711 if (op->should_profile()) {
1712 __ cbz(R0, *failure_target);
1713 }
1714 }
1715
1716 if (op->should_profile()) {
1717 Label done_ok, done_failure;
1718 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1719 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1720 &profile_cast_success, &profile_cast_failure,
1721 &done_ok, &done_failure);
1722 __ bind(done_failure);
1724 __ b(done);
1725 __ bind(done_ok);
1726 __ mov(res, 1);
1727 }
1728 __ bind(done);
1729 break;
1730 }
1731 default:
1732 ShouldNotReachHere();
1733 }
1734 }
1735
1736
1737 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1738 // if (*addr == cmpval) {
1739 // *addr = newval;
1740 // dest = 1;
1741 // } else {
1742 // dest = 0;
1743 // }
1744 #ifdef AARCH64
1745 Label retry, done;
1746 Register addr = op->addr()->as_pointer_register();
1747 Register cmpval = op->cmp_value()->as_pointer_register();
1748 Register newval = op->new_value()->as_pointer_register();
1749 Register dest = op->result_opr()->as_pointer_register();
1750 assert_different_registers(dest, addr, cmpval, newval, Rtemp);
1751
1752 if (UseCompressedOops && op->code() == lir_cas_obj) {
1753 Register tmp1 = op->tmp1()->as_pointer_register();
1754 Register tmp2 = op->tmp2()->as_pointer_register();
1755 assert_different_registers(dest, addr, cmpval, newval, tmp1, tmp2, Rtemp);
1756 __ encode_heap_oop(tmp1, cmpval); cmpval = tmp1;
1757 __ encode_heap_oop(tmp2, newval); newval = tmp2;
1758 }
1759
1760 __ mov(dest, ZR);
1761 __ bind(retry);
1762 if (((op->code() == lir_cas_obj) && !UseCompressedOops) || op->code() == lir_cas_long) {
1763 __ ldaxr(Rtemp, addr);
1764 __ cmp(Rtemp, cmpval);
1765 __ b(done, ne);
1766 __ stlxr(Rtemp, newval, addr);
1767 } else if (((op->code() == lir_cas_obj) && UseCompressedOops) || op->code() == lir_cas_int) {
1768 __ ldaxr_w(Rtemp, addr);
1769 __ cmp_w(Rtemp, cmpval);
1770 __ b(done, ne);
1771 __ stlxr_w(Rtemp, newval, addr);
1772 } else {
1773 ShouldNotReachHere();
1774 }
1775 __ cbnz_w(Rtemp, retry);
1776 __ mov(dest, 1);
1777 __ bind(done);
1778 #else
1779 // FIXME: membar_release
1780 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
1781 Register addr = op->addr()->is_register() ?
1782 op->addr()->as_pointer_register() :
1783 op->addr()->as_address_ptr()->base()->as_pointer_register();
1784 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp");
1785 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_OprDesc::illegalOpr(), "unexpected index");
1786 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1787 Register cmpval = op->cmp_value()->as_register();
1788 Register newval = op->new_value()->as_register();
1789 Register dest = op->result_opr()->as_register();
1790 assert_different_registers(dest, addr, cmpval, newval, Rtemp);
1791
1792 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer
1793 __ mov(dest, 1, eq);
1794 __ mov(dest, 0, ne);
1795 } else if (op->code() == lir_cas_long) {
1796 assert(VM_Version::supports_cx8(), "wrong machine");
1797 Register cmp_value_lo = op->cmp_value()->as_register_lo();
1798 Register cmp_value_hi = op->cmp_value()->as_register_hi();
1799 Register new_value_lo = op->new_value()->as_register_lo();
1800 Register new_value_hi = op->new_value()->as_register_hi();
1801 Register dest = op->result_opr()->as_register();
1802 Register tmp_lo = op->tmp1()->as_register_lo();
1803 Register tmp_hi = op->tmp1()->as_register_hi();
1804
1805 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr);
1806 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
1807 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair");
1808 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
1809 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair");
1810 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi,
1811 new_value_lo, new_value_hi, addr, 0);
1812 } else {
1813 Unimplemented();
1814 }
1815 #endif // AARCH64
1816 // FIXME: is full membar really needed instead of just membar_acquire?
1817 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
1818 }
1819
1820
1821 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1822 AsmCondition acond = al;
1823 AsmCondition ncond = nv;
1824 if (opr1 != opr2) {
1825 switch (condition) {
1826 case lir_cond_equal: acond = eq; ncond = ne; break;
1827 case lir_cond_notEqual: acond = ne; ncond = eq; break;
1828 case lir_cond_less: acond = lt; ncond = ge; break;
1829 case lir_cond_lessEqual: acond = le; ncond = gt; break;
1830 case lir_cond_greaterEqual: acond = ge; ncond = lt; break;
1831 case lir_cond_greater: acond = gt; ncond = le; break;
1832 case lir_cond_aboveEqual: acond = hs; ncond = lo; break;
1833 case lir_cond_belowEqual: acond = ls; ncond = hi; break;
1834 default: ShouldNotReachHere();
1835 }
1836 }
1837
1838 #ifdef AARCH64
1839
1840 // TODO-AARCH64 implement it more efficiently
1841
1842 if (opr1->is_register()) {
1843 reg2reg(opr1, result);
1844 } else if (opr1->is_stack()) {
1845 stack2reg(opr1, result, result->type());
1846 } else if (opr1->is_constant()) {
1847 const2reg(opr1, result, lir_patch_none, NULL);
1848 } else {
1849 ShouldNotReachHere();
1850 }
1851
1852 Label skip;
1853 __ b(skip, acond);
1854
1855 if (opr2->is_register()) {
1856 reg2reg(opr2, result);
1857 } else if (opr2->is_stack()) {
1858 stack2reg(opr2, result, result->type());
1859 } else if (opr2->is_constant()) {
1860 const2reg(opr2, result, lir_patch_none, NULL);
1861 } else {
1862 ShouldNotReachHere();
1863 }
1864
1865 __ bind(skip);
1866
1867 #else
1868 for (;;) { // two iterations only
1869 if (opr1 == result) {
1870 // do nothing
1871 } else if (opr1->is_single_cpu()) {
1872 __ mov(result->as_register(), opr1->as_register(), acond);
1873 } else if (opr1->is_double_cpu()) {
1874 __ long_move(result->as_register_lo(), result->as_register_hi(),
1875 opr1->as_register_lo(), opr1->as_register_hi(), acond);
1876 } else if (opr1->is_single_stack()) {
1877 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond);
1878 } else if (opr1->is_double_stack()) {
1879 __ ldr(result->as_register_lo(),
1880 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond);
1881 __ ldr(result->as_register_hi(),
1882 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond);
1883 } else if (opr1->is_illegal()) {
1884 // do nothing: this part of the cmove has been optimized away in the peephole optimizer
1885 } else {
1886 assert(opr1->is_constant(), "must be");
1887 LIR_Const* c = opr1->as_constant_ptr();
1907 break;
1908 case T_DOUBLE:
1909 #ifdef __SOFTFP__
1910 // not generated now.
1911 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond);
1912 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond);
1913 #else
1914 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond);
1915 #endif // __SOFTFP__
1916 break;
1917 default:
1918 ShouldNotReachHere();
1919 }
1920 }
1921
1922 // Negate the condition and repeat the algorithm with the second operand
1923 if (opr1 == opr2) { break; }
1924 opr1 = opr2;
1925 acond = ncond;
1926 }
1927 #endif // AARCH64
1928 }
1929
1930 #if defined(AARCH64) || defined(ASSERT)
1931 static int reg_size(LIR_Opr op) {
1932 switch (op->type()) {
1933 case T_FLOAT:
1934 case T_INT: return BytesPerInt;
1935 case T_LONG:
1936 case T_DOUBLE: return BytesPerLong;
1937 case T_OBJECT:
1938 case T_ARRAY:
1939 case T_METADATA: return BytesPerWord;
1940 case T_ADDRESS:
1941 case T_ILLEGAL: // fall through
1942 default: ShouldNotReachHere(); return -1;
1943 }
1944 }
1945 #endif
1946
1947 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1948 assert(info == NULL, "unused on this code path");
1949 assert(dest->is_register(), "wrong items state");
1950
1951 if (right->is_address()) {
1952 // special case for adding shifted/extended register
1953 const Register res = dest->as_pointer_register();
1954 const Register lreg = left->as_pointer_register();
1955 const LIR_Address* addr = right->as_address_ptr();
1956
1957 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1958
1959 int scale = addr->scale();
1960 AsmShift shift = lsl;
1961
1962 #ifdef AARCH64
1963 bool is_index_extended = reg_size(addr->base()) > reg_size(addr->index());
1964 if (scale < 0) {
1965 scale = -scale;
1966 shift = lsr;
1967 }
1968 assert(shift == lsl || !is_index_extended, "could not have extend and right shift in one operand");
1969 assert(0 <= scale && scale <= 63, "scale is too large");
1970
1971 if (is_index_extended) {
1972 assert(scale <= 4, "scale is too large for add with extended register");
1973 assert(addr->index()->is_single_cpu(), "should be");
1974 assert(addr->index()->type() == T_INT, "should be");
1975 assert(dest->is_double_cpu(), "should be");
1976 assert(code == lir_add, "special case of add with extended register");
1977
1978 __ add(res, lreg, addr->index()->as_register(), ex_sxtw, scale);
1979 return;
1980 } else if (reg_size(dest) == BytesPerInt) {
1981 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be");
1982 assert(reg_size(addr->base()) == reg_size(dest), "should be");
1983
1984 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale);
1985 switch (code) {
1986 case lir_add: __ add_32(res, lreg, operand); break;
1987 case lir_sub: __ sub_32(res, lreg, operand); break;
1988 default: ShouldNotReachHere();
1989 }
1990 return;
1991 }
1992 #endif // AARCH64
1993
1994 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be");
1995 assert(reg_size(addr->base()) == reg_size(dest), "should be");
1996 assert(reg_size(dest) == wordSize, "should be");
1997
1998 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale);
1999 switch (code) {
2000 case lir_add: __ add(res, lreg, operand); break;
2001 case lir_sub: __ sub(res, lreg, operand); break;
2002 default: ShouldNotReachHere();
2003 }
2004
2005 #ifndef AARCH64
2006 } else if (left->is_address()) {
2007 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()");
2008 const LIR_Address* addr = left->as_address_ptr();
2009 const Register res = dest->as_register();
2010 const Register rreg = right->as_register();
2011 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be");
2012 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale()));
2013 #endif // !AARCH64
2014
2015 } else if (dest->is_single_cpu()) {
2016 assert(left->is_single_cpu(), "unexpected left operand");
2017 #ifdef AARCH64
2018 assert(dest->type() == T_INT, "unexpected dest type");
2019 assert(left->type() == T_INT, "unexpected left type");
2020 assert(right->type() == T_INT, "unexpected right type");
2021 #endif // AARCH64
2022
2023 const Register res = dest->as_register();
2024 const Register lreg = left->as_register();
2025
2026 if (right->is_single_cpu()) {
2027 const Register rreg = right->as_register();
2028 switch (code) {
2029 case lir_add: __ add_32(res, lreg, rreg); break;
2030 case lir_sub: __ sub_32(res, lreg, rreg); break;
2031 case lir_mul: __ mul_32(res, lreg, rreg); break;
2032 default: ShouldNotReachHere();
2033 }
2034 } else {
2035 assert(right->is_constant(), "must be");
2036 const jint c = right->as_constant_ptr()->as_jint();
2037 if (!Assembler::is_arith_imm_in_range(c)) {
2038 BAILOUT("illegal arithmetic operand");
2039 }
2040 switch (code) {
2041 case lir_add: __ add_32(res, lreg, c); break;
2042 case lir_sub: __ sub_32(res, lreg, c); break;
2043 default: ShouldNotReachHere();
2044 }
2045 }
2046
2047 } else if (dest->is_double_cpu()) {
2048 #ifdef AARCH64
2049 assert(left->is_double_cpu() ||
2050 (left->is_single_cpu() && ((left->type() == T_OBJECT) || (left->type() == T_ARRAY) || (left->type() == T_ADDRESS))),
2051 "unexpected left operand");
2052
2053 const Register res = dest->as_register_lo();
2054 const Register lreg = left->as_pointer_register();
2055
2056 if (right->is_constant()) {
2057 assert(right->type() == T_LONG, "unexpected right type");
2058 assert((right->as_constant_ptr()->as_jlong() >> 24) == 0, "out of range");
2059 jint imm = (jint)right->as_constant_ptr()->as_jlong();
2060 switch (code) {
2061 case lir_add: __ add(res, lreg, imm); break;
2062 case lir_sub: __ sub(res, lreg, imm); break;
2063 default: ShouldNotReachHere();
2064 }
2065 } else {
2066 assert(right->is_double_cpu() ||
2067 (right->is_single_cpu() && ((right->type() == T_OBJECT) || (right->type() == T_ARRAY) || (right->type() == T_ADDRESS))),
2068 "unexpected right operand");
2069 const Register rreg = right->as_pointer_register();
2070 switch (code) {
2071 case lir_add: __ add(res, lreg, rreg); break;
2072 case lir_sub: __ sub(res, lreg, rreg); break;
2073 case lir_mul: __ mul(res, lreg, rreg); break;
2074 default: ShouldNotReachHere();
2075 }
2076 }
2077 #else // AARCH64
2078 Register res_lo = dest->as_register_lo();
2079 Register res_hi = dest->as_register_hi();
2080 Register lreg_lo = left->as_register_lo();
2081 Register lreg_hi = left->as_register_hi();
2082 if (right->is_double_cpu()) {
2083 Register rreg_lo = right->as_register_lo();
2084 Register rreg_hi = right->as_register_hi();
2085 if (res_lo == lreg_hi || res_lo == rreg_hi) {
2086 res_lo = Rtemp;
2087 }
2088 switch (code) {
2089 case lir_add:
2090 __ adds(res_lo, lreg_lo, rreg_lo);
2091 __ adc(res_hi, lreg_hi, rreg_hi);
2092 break;
2093 case lir_sub:
2094 __ subs(res_lo, lreg_lo, rreg_lo);
2095 __ sbc(res_hi, lreg_hi, rreg_hi);
2096 break;
2097 default:
2101 assert(right->is_constant(), "must be");
2102 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range");
2103 const jint c = (jint) right->as_constant_ptr()->as_jlong();
2104 if (res_lo == lreg_hi) {
2105 res_lo = Rtemp;
2106 }
2107 switch (code) {
2108 case lir_add:
2109 __ adds(res_lo, lreg_lo, c);
2110 __ adc(res_hi, lreg_hi, 0);
2111 break;
2112 case lir_sub:
2113 __ subs(res_lo, lreg_lo, c);
2114 __ sbc(res_hi, lreg_hi, 0);
2115 break;
2116 default:
2117 ShouldNotReachHere();
2118 }
2119 }
2120 move_regs(res_lo, dest->as_register_lo());
2121 #endif // AARCH64
2122
2123 } else if (dest->is_single_fpu()) {
2124 assert(left->is_single_fpu(), "must be");
2125 assert(right->is_single_fpu(), "must be");
2126 const FloatRegister res = dest->as_float_reg();
2127 const FloatRegister lreg = left->as_float_reg();
2128 const FloatRegister rreg = right->as_float_reg();
2129 switch (code) {
2130 case lir_add: __ add_float(res, lreg, rreg); break;
2131 case lir_sub: __ sub_float(res, lreg, rreg); break;
2132 case lir_mul_strictfp: // fall through
2133 case lir_mul: __ mul_float(res, lreg, rreg); break;
2134 case lir_div_strictfp: // fall through
2135 case lir_div: __ div_float(res, lreg, rreg); break;
2136 default: ShouldNotReachHere();
2137 }
2138 } else if (dest->is_double_fpu()) {
2139 assert(left->is_double_fpu(), "must be");
2140 assert(right->is_double_fpu(), "must be");
2141 const FloatRegister res = dest->as_double_reg();
2158
2159 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
2160 switch (code) {
2161 case lir_abs:
2162 __ abs_double(dest->as_double_reg(), value->as_double_reg());
2163 break;
2164 case lir_sqrt:
2165 __ sqrt_double(dest->as_double_reg(), value->as_double_reg());
2166 break;
2167 default:
2168 ShouldNotReachHere();
2169 }
2170 }
2171
2172
2173 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
2174 assert(dest->is_register(), "wrong items state");
2175 assert(left->is_register(), "wrong items state");
2176
2177 if (dest->is_single_cpu()) {
2178 #ifdef AARCH64
2179 assert (dest->type() == T_INT, "unexpected result type");
2180 assert (left->type() == T_INT, "unexpected left type");
2181 assert (right->type() == T_INT, "unexpected right type");
2182 #endif // AARCH64
2183
2184 const Register res = dest->as_register();
2185 const Register lreg = left->as_register();
2186
2187 if (right->is_single_cpu()) {
2188 const Register rreg = right->as_register();
2189 switch (code) {
2190 case lir_logic_and: __ and_32(res, lreg, rreg); break;
2191 case lir_logic_or: __ orr_32(res, lreg, rreg); break;
2192 case lir_logic_xor: __ eor_32(res, lreg, rreg); break;
2193 default: ShouldNotReachHere();
2194 }
2195 } else {
2196 assert(right->is_constant(), "must be");
2197 const uint c = (uint)right->as_constant_ptr()->as_jint();
2198 switch (code) {
2199 case lir_logic_and: __ and_32(res, lreg, c); break;
2200 case lir_logic_or: __ orr_32(res, lreg, c); break;
2201 case lir_logic_xor: __ eor_32(res, lreg, c); break;
2202 default: ShouldNotReachHere();
2203 }
2204 }
2205 } else {
2206 assert(dest->is_double_cpu(), "should be");
2207 Register res_lo = dest->as_register_lo();
2208
2209 #ifdef AARCH64
2210 assert ((left->is_single_cpu() && left->is_oop_register()) || left->is_double_cpu(), "should be");
2211 const Register lreg_lo = left->as_pointer_register();
2212 #else
2213 assert (dest->type() == T_LONG, "unexpected result type");
2214 assert (left->type() == T_LONG, "unexpected left type");
2215 assert (right->type() == T_LONG, "unexpected right type");
2216
2217 const Register res_hi = dest->as_register_hi();
2218 const Register lreg_lo = left->as_register_lo();
2219 const Register lreg_hi = left->as_register_hi();
2220 #endif // AARCH64
2221
2222 if (right->is_register()) {
2223 #ifdef AARCH64
2224 assert ((right->is_single_cpu() && right->is_oop_register()) || right->is_double_cpu(), "should be");
2225 const Register rreg_lo = right->as_pointer_register();
2226 switch (code) {
2227 case lir_logic_and: __ andr(res_lo, lreg_lo, rreg_lo); break;
2228 case lir_logic_or: __ orr (res_lo, lreg_lo, rreg_lo); break;
2229 case lir_logic_xor: __ eor (res_lo, lreg_lo, rreg_lo); break;
2230 default: ShouldNotReachHere();
2231 }
2232 #else
2233 const Register rreg_lo = right->as_register_lo();
2234 const Register rreg_hi = right->as_register_hi();
2235 if (res_lo == lreg_hi || res_lo == rreg_hi) {
2236 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input
2237 }
2238 switch (code) {
2239 case lir_logic_and:
2240 __ andr(res_lo, lreg_lo, rreg_lo);
2241 __ andr(res_hi, lreg_hi, rreg_hi);
2242 break;
2243 case lir_logic_or:
2244 __ orr(res_lo, lreg_lo, rreg_lo);
2245 __ orr(res_hi, lreg_hi, rreg_hi);
2246 break;
2247 case lir_logic_xor:
2248 __ eor(res_lo, lreg_lo, rreg_lo);
2249 __ eor(res_hi, lreg_hi, rreg_hi);
2250 break;
2251 default:
2252 ShouldNotReachHere();
2253 }
2254 move_regs(res_lo, dest->as_register_lo());
2255 #endif // AARCH64
2256 } else {
2257 assert(right->is_constant(), "must be");
2258 #ifdef AARCH64
2259 const julong c = (julong)right->as_constant_ptr()->as_jlong();
2260 Assembler::LogicalImmediate imm(c, false);
2261 if (imm.is_encoded()) {
2262 switch (code) {
2263 case lir_logic_and: __ andr(res_lo, lreg_lo, imm); break;
2264 case lir_logic_or: __ orr (res_lo, lreg_lo, imm); break;
2265 case lir_logic_xor: __ eor (res_lo, lreg_lo, imm); break;
2266 default: ShouldNotReachHere();
2267 }
2268 } else {
2269 BAILOUT("64 bit constant cannot be inlined");
2270 }
2271 #else
2272 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong();
2273 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32);
2274 // Case for logic_or from do_ClassIDIntrinsic()
2275 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) {
2276 switch (code) {
2277 case lir_logic_and:
2278 __ andr(res_lo, lreg_lo, c_lo);
2279 __ mov(res_hi, 0);
2280 break;
2281 case lir_logic_or:
2282 __ orr(res_lo, lreg_lo, c_lo);
2283 break;
2284 case lir_logic_xor:
2285 __ eor(res_lo, lreg_lo, c_lo);
2286 break;
2287 default:
2288 ShouldNotReachHere();
2289 }
2290 } else if (code == lir_logic_and &&
2291 c_hi == -1 &&
2292 (AsmOperand::is_rotated_imm(c_lo) ||
2293 AsmOperand::is_rotated_imm(~c_lo))) {
2294 // Another case which handles logic_and from do_ClassIDIntrinsic()
2295 if (AsmOperand::is_rotated_imm(c_lo)) {
2296 __ andr(res_lo, lreg_lo, c_lo);
2297 } else {
2298 __ bic(res_lo, lreg_lo, ~c_lo);
2299 }
2300 if (res_hi != lreg_hi) {
2301 __ mov(res_hi, lreg_hi);
2302 }
2303 } else {
2304 BAILOUT("64 bit constant cannot be inlined");
2305 }
2306 #endif // AARCH64
2307 }
2308 }
2309 }
2310
2311
2312 #ifdef AARCH64
2313
2314 void LIR_Assembler::long_compare_helper(LIR_Opr opr1, LIR_Opr opr2) {
2315 assert(opr1->is_double_cpu(), "should be");
2316 Register x = opr1->as_register_lo();
2317
2318 if (opr2->is_double_cpu()) {
2319 Register y = opr2->as_register_lo();
2320 __ cmp(x, y);
2321
2322 } else {
2323 assert(opr2->is_constant(), "should be");
2324 assert(opr2->as_constant_ptr()->type() == T_LONG, "long constant expected");
2325 jlong c = opr2->as_jlong();
2326 assert(((c >> 31) == 0) || ((c >> 31) == -1), "immediate is out of range");
2327 if (c >= 0) {
2328 __ cmp(x, (jint)c);
2329 } else {
2330 __ cmn(x, (jint)(-c));
2331 }
2332 }
2333 }
2334
2335 #endif // AARCH64
2336
2337 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2338 if (opr1->is_single_cpu()) {
2339 if (opr2->is_constant()) {
2340 switch (opr2->as_constant_ptr()->type()) {
2341 case T_INT: {
2342 const jint c = opr2->as_constant_ptr()->as_jint();
2343 if (Assembler::is_arith_imm_in_range(c)) {
2344 __ cmp_32(opr1->as_register(), c);
2345 } else if (Assembler::is_arith_imm_in_range(-c)) {
2346 __ cmn_32(opr1->as_register(), -c);
2347 } else {
2348 // This can happen when compiling lookupswitch
2349 __ mov_slow(Rtemp, c);
2350 __ cmp_32(opr1->as_register(), Rtemp);
2351 }
2352 break;
2353 }
2354 case T_OBJECT:
2355 assert(opr2->as_constant_ptr()->as_jobject() == NULL, "cannot handle otherwise");
2356 __ cmp(opr1->as_register(), 0);
2357 break;
2358 default:
2359 ShouldNotReachHere();
2360 }
2361 } else if (opr2->is_single_cpu()) {
2362 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) {
2363 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY || opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type");
2364 __ cmp(opr1->as_register(), opr2->as_register());
2365 } else {
2366 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type");
2367 __ cmp_32(opr1->as_register(), opr2->as_register());
2368 }
2369 } else {
2370 ShouldNotReachHere();
2371 }
2372 } else if (opr1->is_double_cpu()) {
2373 #ifdef AARCH64
2374 long_compare_helper(opr1, opr2);
2375 #else
2376 Register xlo = opr1->as_register_lo();
2377 Register xhi = opr1->as_register_hi();
2378 if (opr2->is_constant() && opr2->as_jlong() == 0) {
2379 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise");
2380 __ orrs(Rtemp, xlo, xhi);
2381 } else if (opr2->is_register()) {
2382 Register ylo = opr2->as_register_lo();
2383 Register yhi = opr2->as_register_hi();
2384 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2385 __ teq(xhi, yhi);
2386 __ teq(xlo, ylo, eq);
2387 } else {
2388 __ subs(xlo, xlo, ylo);
2389 __ sbcs(xhi, xhi, yhi);
2390 }
2391 } else {
2392 ShouldNotReachHere();
2393 }
2394 #endif // AARCH64
2395 } else if (opr1->is_single_fpu()) {
2396 if (opr2->is_constant()) {
2397 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise");
2398 __ cmp_zero_float(opr1->as_float_reg());
2399 } else {
2400 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg());
2401 }
2402 } else if (opr1->is_double_fpu()) {
2403 if (opr2->is_constant()) {
2404 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise");
2405 __ cmp_zero_double(opr1->as_double_reg());
2406 } else {
2407 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg());
2408 }
2409 } else {
2410 ShouldNotReachHere();
2411 }
2412 }
2413
2414 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2415 const Register res = dst->as_register();
2416 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2417 comp_op(lir_cond_unknown, left, right, op);
2418 #ifdef AARCH64
2419 if (code == lir_ucmp_fd2i) { // unordered is less
2420 __ cset(res, gt); // 1 if '>', else 0
2421 __ csinv(res, res, ZR, ge); // previous value if '>=', else -1
2422 } else {
2423 __ cset(res, hi); // 1 if '>' or unordered, else 0
2424 __ csinv(res, res, ZR, pl); // previous value if '>=' or unordered, else -1
2425 }
2426 #else
2427 __ fmstat();
2428 if (code == lir_ucmp_fd2i) { // unordered is less
2429 __ mvn(res, 0, lt);
2430 __ mov(res, 1, ge);
2431 } else { // unordered is greater
2432 __ mov(res, 1, cs);
2433 __ mvn(res, 0, cc);
2434 }
2435 __ mov(res, 0, eq);
2436 #endif // AARCH64
2437
2438 } else {
2439 assert(code == lir_cmp_l2i, "must be");
2440
2441 #ifdef AARCH64
2442 long_compare_helper(left, right);
2443
2444 __ cset(res, gt); // 1 if '>', else 0
2445 __ csinv(res, res, ZR, ge); // previous value if '>=', else -1
2446 #else
2447 Label done;
2448 const Register xlo = left->as_register_lo();
2449 const Register xhi = left->as_register_hi();
2450 const Register ylo = right->as_register_lo();
2451 const Register yhi = right->as_register_hi();
2452 __ cmp(xhi, yhi);
2453 __ mov(res, 1, gt);
2454 __ mvn(res, 0, lt);
2455 __ b(done, ne);
2456 __ subs(res, xlo, ylo);
2457 __ mov(res, 1, hi);
2458 __ mvn(res, 0, lo);
2459 __ bind(done);
2460 #endif // AARCH64
2461 }
2462 }
2463
2464
2465 void LIR_Assembler::align_call(LIR_Code code) {
2466 // Not needed
2467 }
2468
2469
2470 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) {
2471 int ret_addr_offset = __ patchable_call(op->addr(), rtype);
2472 assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
2473 add_call_info_here(op->info());
2474 }
2475
2476
2477 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) {
2478 bool near_range = __ cache_fully_reachable();
2479 address oop_address = pc();
2480
2481 bool use_movw = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw());
2482
2483 // Ricklass may contain something that is not a metadata pointer so
2484 // mov_metadata can't be used
2485 InlinedAddress value((address)Universe::non_oop_word());
2486 InlinedAddress addr(op->addr());
2487 if (use_movw) {
2488 #ifdef AARCH64
2489 ShouldNotReachHere();
2490 #else
2491 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff);
2492 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16);
2493 #endif // AARCH64
2494 } else {
2495 // No movw/movt, must be load a pc relative value but no
2496 // relocation so no metadata table to load from.
2497 // Use a b instruction rather than a bl, inline constant after the
2498 // branch, use a PC relative ldr to load the constant, arrange for
2499 // the call to return after the constant(s).
2500 __ ldr_literal(Ricklass, value);
2501 }
2502 __ relocate(virtual_call_Relocation::spec(oop_address));
2503 if (near_range && use_movw) {
2504 __ bl(op->addr());
2505 } else {
2506 Label call_return;
2507 __ adr(LR, call_return);
2508 if (near_range) {
2509 __ b(op->addr());
2510 } else {
2511 __ indirect_jump(addr, Rtemp);
2512 __ bind_literal(addr);
2513 }
2565 assert(exceptionOop->as_register() == Rexception_obj, "must match");
2566 assert(exceptionPC->as_register() == Rexception_pc, "must match");
2567 info->add_register_oop(exceptionOop);
2568
2569 Runtime1::StubID handle_id = compilation()->has_fpu_code() ?
2570 Runtime1::handle_exception_id :
2571 Runtime1::handle_exception_nofpu_id;
2572 Label return_address;
2573 __ adr(Rexception_pc, return_address);
2574 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type);
2575 __ bind(return_address);
2576 add_call_info_here(info); // for exception handler
2577 }
2578
2579 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2580 assert(exceptionOop->as_register() == Rexception_obj, "must match");
2581 __ b(_unwind_handler_entry);
2582 }
2583
2584 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2585 #ifdef AARCH64
2586 if (dest->is_single_cpu()) {
2587 Register res = dest->as_register();
2588 Register x = left->as_register();
2589 Register y = count->as_register();
2590 assert (dest->type() == T_INT, "unexpected result type");
2591 assert (left->type() == T_INT, "unexpected left type");
2592
2593 switch (code) {
2594 case lir_shl: __ lslv_w(res, x, y); break;
2595 case lir_shr: __ asrv_w(res, x, y); break;
2596 case lir_ushr: __ lsrv_w(res, x, y); break;
2597 default: ShouldNotReachHere();
2598 }
2599 } else if (dest->is_double_cpu()) {
2600 Register res = dest->as_register_lo();
2601 Register x = left->as_register_lo();
2602 Register y = count->as_register();
2603
2604 switch (code) {
2605 case lir_shl: __ lslv(res, x, y); break;
2606 case lir_shr: __ asrv(res, x, y); break;
2607 case lir_ushr: __ lsrv(res, x, y); break;
2608 default: ShouldNotReachHere();
2609 }
2610 } else {
2611 ShouldNotReachHere();
2612 }
2613 #else
2614 AsmShift shift = lsl;
2615 switch (code) {
2616 case lir_shl: shift = lsl; break;
2617 case lir_shr: shift = asr; break;
2618 case lir_ushr: shift = lsr; break;
2619 default: ShouldNotReachHere();
2620 }
2621
2622 if (dest->is_single_cpu()) {
2623 __ andr(Rtemp, count->as_register(), 31);
2624 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp));
2625 } else if (dest->is_double_cpu()) {
2626 Register dest_lo = dest->as_register_lo();
2627 Register dest_hi = dest->as_register_hi();
2628 Register src_lo = left->as_register_lo();
2629 Register src_hi = left->as_register_hi();
2630 Register Rcount = count->as_register();
2631 // Resolve possible register conflicts
2632 if (shift == lsl && dest_hi == src_lo) {
2633 dest_hi = Rtemp;
2634 } else if (shift != lsl && dest_lo == src_hi) {
2635 dest_lo = Rtemp;
2636 } else if (dest_lo == src_lo && dest_hi == src_hi) {
2637 dest_lo = Rtemp;
2638 } else if (dest_lo == Rcount || dest_hi == Rcount) {
2639 Rcount = Rtemp;
2640 }
2641 __ andr(Rcount, count->as_register(), 63);
2642 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount);
2643 move_regs(dest_lo, dest->as_register_lo());
2644 move_regs(dest_hi, dest->as_register_hi());
2645 } else {
2646 ShouldNotReachHere();
2647 }
2648 #endif // AARCH64
2649 }
2650
2651
2652 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2653 #ifdef AARCH64
2654 if (dest->is_single_cpu()) {
2655 assert (dest->type() == T_INT, "unexpected result type");
2656 assert (left->type() == T_INT, "unexpected left type");
2657 count &= 31;
2658 if (count != 0) {
2659 switch (code) {
2660 case lir_shl: __ _lsl_w(dest->as_register(), left->as_register(), count); break;
2661 case lir_shr: __ _asr_w(dest->as_register(), left->as_register(), count); break;
2662 case lir_ushr: __ _lsr_w(dest->as_register(), left->as_register(), count); break;
2663 default: ShouldNotReachHere();
2664 }
2665 } else {
2666 move_regs(left->as_register(), dest->as_register());
2667 }
2668 } else if (dest->is_double_cpu()) {
2669 count &= 63;
2670 if (count != 0) {
2671 switch (code) {
2672 case lir_shl: __ _lsl(dest->as_register_lo(), left->as_register_lo(), count); break;
2673 case lir_shr: __ _asr(dest->as_register_lo(), left->as_register_lo(), count); break;
2674 case lir_ushr: __ _lsr(dest->as_register_lo(), left->as_register_lo(), count); break;
2675 default: ShouldNotReachHere();
2676 }
2677 } else {
2678 move_regs(left->as_register_lo(), dest->as_register_lo());
2679 }
2680 } else {
2681 ShouldNotReachHere();
2682 }
2683
2684 #else
2685 AsmShift shift = lsl;
2686 switch (code) {
2687 case lir_shl: shift = lsl; break;
2688 case lir_shr: shift = asr; break;
2689 case lir_ushr: shift = lsr; break;
2690 default: ShouldNotReachHere();
2691 }
2692
2693 if (dest->is_single_cpu()) {
2694 count &= 31;
2695 if (count != 0) {
2696 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count));
2697 } else {
2698 move_regs(left->as_register(), dest->as_register());
2699 }
2700 } else if (dest->is_double_cpu()) {
2701 count &= 63;
2702 if (count != 0) {
2703 Register dest_lo = dest->as_register_lo();
2704 Register dest_hi = dest->as_register_hi();
2705 Register src_lo = left->as_register_lo();
2706 Register src_hi = left->as_register_hi();
2707 // Resolve possible register conflicts
2708 if (shift == lsl && dest_hi == src_lo) {
2709 dest_hi = Rtemp;
2710 } else if (shift != lsl && dest_lo == src_hi) {
2711 dest_lo = Rtemp;
2712 }
2713 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count);
2714 move_regs(dest_lo, dest->as_register_lo());
2715 move_regs(dest_hi, dest->as_register_hi());
2716 } else {
2717 __ long_move(dest->as_register_lo(), dest->as_register_hi(),
2718 left->as_register_lo(), left->as_register_hi());
2719 }
2720 } else {
2721 ShouldNotReachHere();
2722 }
2723 #endif // AARCH64
2724 }
2725
2726
2727 // Saves 4 given registers in reserved argument area.
2728 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2729 verify_reserved_argument_area_size(4);
2730 #ifdef AARCH64
2731 __ stp(r1, r2, Address(SP, 0));
2732 __ stp(r3, r4, Address(SP, 2*wordSize));
2733 #else
2734 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4));
2735 #endif // AARCH64
2736 }
2737
2738 // Restores 4 given registers from reserved argument area.
2739 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2740 #ifdef AARCH64
2741 __ ldp(r1, r2, Address(SP, 0));
2742 __ ldp(r3, r4, Address(SP, 2*wordSize));
2743 #else
2744 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback);
2745 #endif // AARCH64
2746 }
2747
2748
2749 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2750 ciArrayKlass* default_type = op->expected_type();
2751 Register src = op->src()->as_register();
2752 Register src_pos = op->src_pos()->as_register();
2753 Register dst = op->dst()->as_register();
2754 Register dst_pos = op->dst_pos()->as_register();
2755 Register length = op->length()->as_register();
2756 Register tmp = op->tmp()->as_register();
2757 Register tmp2 = Rtemp;
2758
2759 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption");
2760 #ifdef AARCH64
2761 assert(length == R4, "code assumption");
2762 #endif // AARCH64
2763
2764 CodeStub* stub = op->stub();
2765
2766 int flags = op->flags();
2767 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2768 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2769
2770 // If we don't know anything or it's an object array, just go through the generic arraycopy
2771 if (default_type == NULL) {
2772
2773 // save arguments, because they will be killed by a runtime call
2774 save_in_reserved_area(R0, R1, R2, R3);
2775
2776 #ifdef AARCH64
2777 // save length argument, will be killed by a runtime call
2778 __ raw_push(length, ZR);
2779 #else
2780 // pass length argument on SP[0]
2781 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment
2782 #endif // AARCH64
2783
2784 address copyfunc_addr = StubRoutines::generic_arraycopy();
2785 assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2786 #ifndef PRODUCT
2787 if (PrintC1Statistics) {
2788 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2);
2789 }
2790 #endif // !PRODUCT
2791 // the stub is in the code cache so close enough
2792 __ call(copyfunc_addr, relocInfo::runtime_call_type);
2793
2794 #ifdef AARCH64
2795 __ raw_pop(length, ZR);
2796 #else
2797 __ add(SP, SP, 2*wordSize);
2798 #endif // AARCH64
2799
2800 __ cbz_32(R0, *stub->continuation());
2801
2802 __ mvn_32(tmp, R0);
2803 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
2804 __ sub_32(length, length, tmp);
2805 __ add_32(src_pos, src_pos, tmp);
2806 __ add_32(dst_pos, dst_pos, tmp);
2807
2808 __ b(*stub->entry());
2809
2810 __ bind(*stub->continuation());
2811 return;
2812 }
2813
2814 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(),
2815 "must be true at this point");
2816 int elem_size = type2aelembytes(basic_type);
2817 int shift = exact_log2(elem_size);
2818
2952 __ load_klass(tmp, src);
2953 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2954 __ load_klass(tmp, dst);
2955 }
2956 int lh_offset = in_bytes(Klass::layout_helper_offset());
2957
2958 __ ldr_u32(tmp2, Address(tmp, lh_offset));
2959
2960 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2961 __ mov_slow(tmp, objArray_lh);
2962 __ cmp_32(tmp, tmp2);
2963 __ b(*stub->entry(), ne);
2964 }
2965
2966 save_in_reserved_area(R0, R1, R2, R3);
2967
2968 Register src_ptr = R0;
2969 Register dst_ptr = R1;
2970 Register len = R2;
2971 Register chk_off = R3;
2972 Register super_k = AARCH64_ONLY(R4) NOT_AARCH64(tmp);
2973
2974 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2975 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2976
2977 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2978 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2979 __ load_klass(tmp, dst);
2980
2981 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2982 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2983
2984 #ifdef AARCH64
2985 __ raw_push(length, ZR); // Preserve length around *copyfunc_addr call
2986
2987 __ mov(len, length);
2988 __ ldr(super_k, Address(tmp, ek_offset)); // super_k == R4 == length, so this load cannot be performed earlier
2989 // TODO-AARCH64: check whether it is faster to load super klass early by using tmp and additional mov.
2990 __ ldr_u32(chk_off, Address(super_k, sco_offset));
2991 #else // AARCH64
2992 __ ldr(super_k, Address(tmp, ek_offset));
2993
2994 __ mov(len, length);
2995 __ ldr_u32(chk_off, Address(super_k, sco_offset));
2996 __ push(super_k);
2997 #endif // AARCH64
2998
2999 __ call(copyfunc_addr, relocInfo::runtime_call_type);
3000
3001 #ifndef PRODUCT
3002 if (PrintC1Statistics) {
3003 Label failed;
3004 __ cbnz_32(R0, failed);
3005 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2);
3006 __ bind(failed);
3007 }
3008 #endif // PRODUCT
3009
3010 #ifdef AARCH64
3011 __ raw_pop(length, ZR);
3012 #else
3013 __ add(SP, SP, wordSize); // Drop super_k argument
3014 #endif // AARCH64
3015
3016 __ cbz_32(R0, *stub->continuation());
3017 __ mvn_32(tmp, R0);
3018
3019 // load saved arguments in slow case only
3020 restore_from_reserved_area(R0, R1, R2, R3);
3021
3022 __ sub_32(length, length, tmp);
3023 __ add_32(src_pos, src_pos, tmp);
3024 __ add_32(dst_pos, dst_pos, tmp);
3025
3026 #ifndef PRODUCT
3027 if (PrintC1Statistics) {
3028 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2);
3029 }
3030 #endif
3031
3032 __ b(*stub->entry());
3033
3034 __ bind(cont);
3056 Register len = R2;
3057
3058 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
3059 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
3060
3061 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
3062 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
3063
3064 __ mov(len, length);
3065
3066 __ call(entry, relocInfo::runtime_call_type);
3067
3068 __ bind(*stub->continuation());
3069 }
3070
3071 #ifdef ASSERT
3072 // emit run-time assertion
3073 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3074 assert(op->code() == lir_assert, "must be");
3075
3076 #ifdef AARCH64
3077 __ NOT_IMPLEMENTED();
3078 #else
3079 if (op->in_opr1()->is_valid()) {
3080 assert(op->in_opr2()->is_valid(), "both operands must be valid");
3081 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3082 } else {
3083 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3084 assert(op->condition() == lir_cond_always, "no other conditions allowed");
3085 }
3086
3087 Label ok;
3088 if (op->condition() != lir_cond_always) {
3089 AsmCondition acond = al;
3090 switch (op->condition()) {
3091 case lir_cond_equal: acond = eq; break;
3092 case lir_cond_notEqual: acond = ne; break;
3093 case lir_cond_less: acond = lt; break;
3094 case lir_cond_lessEqual: acond = le; break;
3095 case lir_cond_greaterEqual: acond = ge; break;
3096 case lir_cond_greater: acond = gt; break;
3097 case lir_cond_aboveEqual: acond = hs; break;
3098 case lir_cond_belowEqual: acond = ls; break;
3099 default: ShouldNotReachHere();
3100 }
3101 __ b(ok, acond);
3102 }
3103 if (op->halt()) {
3104 const char* str = __ code_string(op->msg());
3105 __ stop(str);
3106 } else {
3107 breakpoint();
3108 }
3109 __ bind(ok);
3110 #endif // AARCH64
3111 }
3112 #endif // ASSERT
3113
3114 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3115 fatal("CRC32 intrinsic is not implemented on this platform");
3116 }
3117
3118 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3119 Register obj = op->obj_opr()->as_pointer_register();
3120 Register hdr = op->hdr_opr()->as_pointer_register();
3121 Register lock = op->lock_opr()->as_pointer_register();
3122 Register tmp = op->scratch_opr()->is_illegal() ? noreg :
3123 op->scratch_opr()->as_pointer_register();
3124
3125 if (!UseFastLocking) {
3126 __ b(*op->stub()->entry());
3127 } else if (op->code() == lir_lock) {
3128 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3129 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
3130 if (op->info() != NULL) {
3139 }
3140
3141
3142 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3143 ciMethod* method = op->profiled_method();
3144 int bci = op->profiled_bci();
3145 ciMethod* callee = op->profiled_callee();
3146
3147 // Update counter for all call types
3148 ciMethodData* md = method->method_data_or_null();
3149 assert(md != NULL, "Sanity");
3150 ciProfileData* data = md->bci_to_data(bci);
3151 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3152 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3153 Register mdo = op->mdo()->as_register();
3154 assert(op->tmp1()->is_register(), "tmp1 must be allocated");
3155 Register tmp1 = op->tmp1()->as_pointer_register();
3156 assert_different_registers(mdo, tmp1);
3157 __ mov_metadata(mdo, md->constant_encoding());
3158 int mdo_offset_bias = 0;
3159 int max_offset = AARCH64_ONLY(4096 << LogBytesPerWord) NOT_AARCH64(4096);
3160 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) {
3161 // The offset is large so bias the mdo by the base of the slot so
3162 // that the ldr can use an immediate offset to reference the slots of the data
3163 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
3164 __ mov_slow(tmp1, mdo_offset_bias);
3165 __ add(mdo, mdo, tmp1);
3166 }
3167
3168 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
3169 // Perform additional virtual call profiling for invokevirtual and
3170 // invokeinterface bytecodes
3171 if (op->should_profile_receiver_type()) {
3172 assert(op->recv()->is_single_cpu(), "recv must be allocated");
3173 Register recv = op->recv()->as_register();
3174 assert_different_registers(mdo, tmp1, recv);
3175 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3176 ciKlass* known_klass = op->known_holder();
3177 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3178 // We know the type that will be seen at this call site; we can
3179 // statically update the MethodData* rather than needing to do
3235 __ str(tmp1, counter_addr);
3236 }
3237 }
3238
3239 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3240 fatal("Type profiling not implemented on this platform");
3241 }
3242
3243 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3244 Unimplemented();
3245 }
3246
3247
3248 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3249 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
3250 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp());
3251 }
3252
3253
3254 void LIR_Assembler::align_backward_branch_target() {
3255 // TODO-AARCH64 review it
3256 // Some ARM processors do better with 8-byte branch target alignment
3257 __ align(8);
3258 }
3259
3260
3261 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3262
3263 if (left->is_single_cpu()) {
3264 assert (dest->type() == T_INT, "unexpected result type");
3265 assert (left->type() == T_INT, "unexpected left type");
3266 __ neg_32(dest->as_register(), left->as_register());
3267 } else if (left->is_double_cpu()) {
3268 #ifdef AARCH64
3269 __ neg(dest->as_register_lo(), left->as_register_lo());
3270 #else
3271 Register dest_lo = dest->as_register_lo();
3272 Register dest_hi = dest->as_register_hi();
3273 Register src_lo = left->as_register_lo();
3274 Register src_hi = left->as_register_hi();
3275 if (dest_lo == src_hi) {
3276 dest_lo = Rtemp;
3277 }
3278 __ rsbs(dest_lo, src_lo, 0);
3279 __ rsc(dest_hi, src_hi, 0);
3280 move_regs(dest_lo, dest->as_register_lo());
3281 #endif // AARCH64
3282 } else if (left->is_single_fpu()) {
3283 __ neg_float(dest->as_float_reg(), left->as_float_reg());
3284 } else if (left->is_double_fpu()) {
3285 __ neg_double(dest->as_double_reg(), left->as_double_reg());
3286 } else {
3287 ShouldNotReachHere();
3288 }
3289 }
3290
3291
3292 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3293 assert(patch_code == lir_patch_none, "Patch code not supported");
3294 LIR_Address* addr = addr_opr->as_address_ptr();
3295 if (addr->index()->is_illegal()) {
3296 jint c = addr->disp();
3297 if (!Assembler::is_arith_imm_in_range(c)) {
3298 BAILOUT("illegal arithmetic operand");
3299 }
3300 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c);
3301 } else {
3302 assert(addr->disp() == 0, "cannot handle otherwise");
3303 #ifdef AARCH64
3304 assert(addr->index()->is_double_cpu(), "should be");
3305 #endif // AARCH64
3306 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(),
3307 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale()));
3308 }
3309 }
3310
3311
3312 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3313 assert(!tmp->is_valid(), "don't need temporary");
3314 __ call(dest);
3315 if (info != NULL) {
3316 add_call_info_here(info);
3317 }
3318 }
3319
3320
3321 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3322 #ifdef AARCH64
3323 Unimplemented(); // TODO-AARCH64: Use stlr/ldar instructions for volatile load/store
3324 #else
3325 assert(src->is_double_cpu() && dest->is_address() ||
3326 src->is_address() && dest->is_double_cpu(),
3327 "Simple move_op is called for all other cases");
3328
3329 int null_check_offset;
3330 if (dest->is_address()) {
3331 // Store
3332 const LIR_Address* addr = dest->as_address_ptr();
3333 const Register src_lo = src->as_register_lo();
3334 const Register src_hi = src->as_register_hi();
3335 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
3336
3337 if (src_lo < src_hi) {
3338 null_check_offset = __ offset();
3339 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi));
3340 } else {
3341 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register");
3342 __ mov(Rtemp, src_hi);
3343 null_check_offset = __ offset();
3344 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp));
3346 } else {
3347 // Load
3348 const LIR_Address* addr = src->as_address_ptr();
3349 const Register dest_lo = dest->as_register_lo();
3350 const Register dest_hi = dest->as_register_hi();
3351 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
3352
3353 null_check_offset = __ offset();
3354 if (dest_lo < dest_hi) {
3355 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi));
3356 } else {
3357 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register");
3358 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp));
3359 __ mov(dest_hi, Rtemp);
3360 }
3361 }
3362
3363 if (info != NULL) {
3364 add_debug_info_for_null_check(null_check_offset, info);
3365 }
3366 #endif // AARCH64
3367 }
3368
3369
3370 void LIR_Assembler::membar() {
3371 __ membar(MacroAssembler::StoreLoad, Rtemp);
3372 }
3373
3374 void LIR_Assembler::membar_acquire() {
3375 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3376 }
3377
3378 void LIR_Assembler::membar_release() {
3379 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3380 }
3381
3382 void LIR_Assembler::membar_loadload() {
3383 __ membar(MacroAssembler::LoadLoad, Rtemp);
3384 }
3385
3386 void LIR_Assembler::membar_storestore() {
3388 }
3389
3390 void LIR_Assembler::membar_loadstore() {
3391 __ membar(MacroAssembler::LoadStore, Rtemp);
3392 }
3393
3394 void LIR_Assembler::membar_storeload() {
3395 __ membar(MacroAssembler::StoreLoad, Rtemp);
3396 }
3397
3398 void LIR_Assembler::on_spin_wait() {
3399 Unimplemented();
3400 }
3401
3402 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3403 // Not used on ARM
3404 Unimplemented();
3405 }
3406
3407 void LIR_Assembler::peephole(LIR_List* lir) {
3408 #ifdef AARCH64
3409 return; // TODO-AARCH64 implement peephole optimizations
3410 #endif
3411 LIR_OpList* inst = lir->instructions_list();
3412 const int inst_length = inst->length();
3413 for (int i = 0; i < inst_length; i++) {
3414 LIR_Op* op = inst->at(i);
3415 switch (op->code()) {
3416 case lir_cmp: {
3417 // Replace:
3418 // cmp rX, y
3419 // cmove [EQ] y, z, rX
3420 // with
3421 // cmp rX, y
3422 // cmove [EQ] illegalOpr, z, rX
3423 //
3424 // or
3425 // cmp rX, y
3426 // cmove [NE] z, y, rX
3427 // with
3428 // cmp rX, y
3429 // cmove [NE] z, illegalOpr, rX
3430 //
3454 if (cmp_res != LIR_OprFact::illegalOpr) {
3455 LIR_Condition cond = cmove->condition();
3456 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) {
3457 cmove->set_in_opr1(LIR_OprFact::illegalOpr);
3458 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) {
3459 cmove->set_in_opr2(LIR_OprFact::illegalOpr);
3460 }
3461 }
3462 }
3463 }
3464 break;
3465 }
3466
3467 default:
3468 break;
3469 }
3470 }
3471 }
3472
3473 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
3474 #ifdef AARCH64
3475 Register ptr = src->as_pointer_register();
3476 #else
3477 assert(src->is_address(), "sanity");
3478 Address addr = as_Address(src->as_address_ptr());
3479 #endif
3480
3481 if (code == lir_xchg) {
3482 #ifdef AARCH64
3483 if (UseCompressedOops && data->is_oop()) {
3484 __ encode_heap_oop(tmp->as_pointer_register(), data->as_register());
3485 }
3486 #endif // AARCH64
3487 } else {
3488 assert (!data->is_oop(), "xadd for oops");
3489 }
3490
3491 #ifndef AARCH64
3492 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3493 #endif // !AARCH64
3494
3495 Label retry;
3496 __ bind(retry);
3497
3498 if ((data->type() == T_INT) || (data->is_oop() AARCH64_ONLY(&& UseCompressedOops))) {
3499 Register dst = dest->as_register();
3500 Register new_val = noreg;
3501 #ifdef AARCH64
3502 __ ldaxr_w(dst, ptr);
3503 #else
3504 __ ldrex(dst, addr);
3505 #endif
3506 if (code == lir_xadd) {
3507 Register tmp_reg = tmp->as_register();
3508 if (data->is_constant()) {
3509 assert_different_registers(dst, tmp_reg);
3510 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint());
3511 } else {
3512 assert_different_registers(dst, tmp_reg, data->as_register());
3513 __ add_32(tmp_reg, dst, data->as_register());
3514 }
3515 new_val = tmp_reg;
3516 } else {
3517 if (UseCompressedOops && data->is_oop()) {
3518 new_val = tmp->as_pointer_register();
3519 } else {
3520 new_val = data->as_register();
3521 }
3522 assert_different_registers(dst, new_val);
3523 }
3524 #ifdef AARCH64
3525 __ stlxr_w(Rtemp, new_val, ptr);
3526 #else
3527 __ strex(Rtemp, new_val, addr);
3528 #endif // AARCH64
3529
3530 #ifdef AARCH64
3531 } else if ((data->type() == T_LONG) || (data->is_oop() && !UseCompressedOops)) {
3532 Register dst = dest->as_pointer_register();
3533 Register new_val = noreg;
3534 __ ldaxr(dst, ptr);
3535 if (code == lir_xadd) {
3536 Register tmp_reg = tmp->as_pointer_register();
3537 if (data->is_constant()) {
3538 assert_different_registers(dst, ptr, tmp_reg);
3539 jlong c = data->as_constant_ptr()->as_jlong();
3540 assert((jlong)((jint)c) == c, "overflow");
3541 __ add(tmp_reg, dst, (jint)c);
3542 } else {
3543 assert_different_registers(dst, ptr, tmp_reg, data->as_pointer_register());
3544 __ add(tmp_reg, dst, data->as_pointer_register());
3545 }
3546 new_val = tmp_reg;
3547 } else {
3548 new_val = data->as_pointer_register();
3549 assert_different_registers(dst, ptr, new_val);
3550 }
3551 __ stlxr(Rtemp, new_val, ptr);
3552 #else
3553 } else if (data->type() == T_LONG) {
3554 Register dst_lo = dest->as_register_lo();
3555 Register new_val_lo = noreg;
3556 Register dst_hi = dest->as_register_hi();
3557
3558 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair");
3559 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair");
3560
3561 __ bind(retry);
3562 __ ldrexd(dst_lo, addr);
3563 if (code == lir_xadd) {
3564 Register tmp_lo = tmp->as_register_lo();
3565 Register tmp_hi = tmp->as_register_hi();
3566
3567 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
3568 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
3569
3570 if (data->is_constant()) {
3571 jlong c = data->as_constant_ptr()->as_jlong();
3572 assert((jlong)((jint)c) == c, "overflow");
3573 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi);
3574 __ adds(tmp_lo, dst_lo, (jint)c);
3575 __ adc(tmp_hi, dst_hi, 0);
3576 } else {
3577 Register new_val_lo = data->as_register_lo();
3578 Register new_val_hi = data->as_register_hi();
3579 __ adds(tmp_lo, dst_lo, new_val_lo);
3580 __ adc(tmp_hi, dst_hi, new_val_hi);
3581 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi);
3582 }
3583 new_val_lo = tmp_lo;
3584 } else {
3585 new_val_lo = data->as_register_lo();
3586 Register new_val_hi = data->as_register_hi();
3587
3588 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi);
3589 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair");
3590 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair");
3591 }
3592 __ strexd(Rtemp, new_val_lo, addr);
3593 #endif // AARCH64
3594 } else {
3595 ShouldNotReachHere();
3596 }
3597
3598 __ cbnz_32(Rtemp, retry);
3599 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
3600
3601 #ifdef AARCH64
3602 if (UseCompressedOops && data->is_oop()) {
3603 __ decode_heap_oop(dest->as_register());
3604 }
3605 #endif // AARCH64
3606 }
3607
3608 #undef __
|
110 void LIR_Assembler::ffree(int i) {
111 Unimplemented();
112 }
113
114 void LIR_Assembler::breakpoint() {
115 __ breakpoint();
116 }
117
118 void LIR_Assembler::push(LIR_Opr opr) {
119 Unimplemented();
120 }
121
122 void LIR_Assembler::pop(LIR_Opr opr) {
123 Unimplemented();
124 }
125
126 //-------------------------------------------
127 Address LIR_Assembler::as_Address(LIR_Address* addr) {
128 Register base = addr->base()->as_pointer_register();
129
130
131 if (addr->index()->is_illegal() || addr->index()->is_constant()) {
132 int offset = addr->disp();
133 if (addr->index()->is_constant()) {
134 offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale();
135 }
136
137 if ((offset <= -4096) || (offset >= 4096)) {
138 BAILOUT_("offset not in range", Address(base));
139 }
140
141 return Address(base, offset);
142
143 } else {
144 assert(addr->disp() == 0, "can't have both");
145 int scale = addr->scale();
146
147 assert(addr->index()->is_single_cpu(), "should be");
148 return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) :
149 Address(base, addr->index()->as_register(), lsr, -scale);
150 }
151 }
152
153 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
154 Address base = as_Address(addr);
155 assert(base.index() == noreg, "must be");
156 if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); }
157 return Address(base.base(), base.disp() + BytesPerWord);
158 }
159
160 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
161 return as_Address(addr);
162 }
163
164
165 void LIR_Assembler::osr_entry() {
166 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
167 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
168 ValueStack* entry_state = osr_entry->end()->state();
169 int number_of_locks = entry_state->locks_size();
170
171 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
172 Register OSR_buf = osrBufferPointer()->as_pointer_register();
173
174 assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
175 int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord;
176 for (int i = 0; i < number_of_locks; i++) {
177 int slot_offset = monitor_offset - (i * 2 * BytesPerWord);
178 __ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord));
179 __ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord));
180 __ str(R1, frame_map()->address_for_monitor_lock(i));
181 __ str(R2, frame_map()->address_for_monitor_object(i));
277
278 // Emit the slow path assembly
279 if (stub != NULL) {
280 stub->emit_code(this);
281 }
282
283 return offset;
284 }
285
286
287 int LIR_Assembler::emit_deopt_handler() {
288 address handler_base = __ start_a_stub(deopt_handler_size());
289 if (handler_base == NULL) {
290 bailout("deopt handler overflow");
291 return -1;
292 }
293
294 int offset = code_offset();
295
296 __ mov_relative_address(LR, __ pc());
297 __ push(LR); // stub expects LR to be saved
298 __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
299
300 assert(code_offset() - offset <= deopt_handler_size(), "overflow");
301 __ end_a_stub();
302
303 return offset;
304 }
305
306
307 void LIR_Assembler::return_op(LIR_Opr result) {
308 // Pop the frame before safepoint polling
309 __ remove_frame(initial_frame_size_in_bytes());
310
311 // mov_slow here is usually one or two instruction
312 __ mov_address(Rtemp, os::get_polling_page(), symbolic_Relocation::polling_page_reference);
313 __ relocate(relocInfo::poll_return_type);
314 __ ldr(Rtemp, Address(Rtemp));
315 __ ret();
316 }
317
318
319 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
320 __ mov_address(Rtemp, os::get_polling_page(), symbolic_Relocation::polling_page_reference);
321 if (info != NULL) {
322 add_debug_info_for_branch(info);
323 }
324 int offset = __ offset();
325 __ relocate(relocInfo::poll_type);
326 __ ldr(Rtemp, Address(Rtemp));
327 return offset;
328 }
329
330
331 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
332 if (from_reg != to_reg) {
333 __ mov(to_reg, from_reg);
334 }
335 }
336
337 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
338 assert(src->is_constant() && dest->is_register(), "must be");
339 LIR_Const* c = src->as_constant_ptr();
340
341 switch (c->type()) {
342 case T_ADDRESS:
343 case T_INT:
344 assert(patch_code == lir_patch_none, "no patching handled here");
345 __ mov_slow(dest->as_register(), c->as_jint());
346 break;
347
348 case T_LONG:
349 assert(patch_code == lir_patch_none, "no patching handled here");
350 __ mov_slow(dest->as_register_lo(), c->as_jint_lo());
351 __ mov_slow(dest->as_register_hi(), c->as_jint_hi());
352 break;
353
354 case T_OBJECT:
355 if (patch_code == lir_patch_none) {
356 __ mov_oop(dest->as_register(), c->as_jobject());
357 } else {
358 jobject2reg_with_patching(dest->as_register(), info);
359 }
360 break;
361
362 case T_METADATA:
363 if (patch_code == lir_patch_none) {
364 __ mov_metadata(dest->as_register(), c->as_metadata());
365 } else {
366 klass2reg_with_patching(dest->as_register(), info);
367 }
368 break;
369
370 case T_FLOAT:
371 if (dest->is_single_fpu()) {
372 __ mov_float(dest->as_float_reg(), c->as_jfloat());
373 } else {
374 // Simple getters can return float constant directly into r0
375 __ mov_slow(dest->as_register(), c->as_jint_bits());
376 }
377 break;
378
379 case T_DOUBLE:
380 if (dest->is_double_fpu()) {
381 __ mov_double(dest->as_double_reg(), c->as_jdouble());
382 } else {
383 // Simple getters can return double constant directly into r1r0
384 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits());
385 __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits());
386 }
387 break;
388
389 default:
390 ShouldNotReachHere();
391 }
392 }
393
394 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
395 assert(src->is_constant(), "must be");
396 assert(dest->is_stack(), "must be");
397 LIR_Const* c = src->as_constant_ptr();
398
399 switch (c->type()) {
400 case T_INT: // fall through
401 case T_FLOAT:
402 __ mov_slow(Rtemp, c->as_jint_bits());
403 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
404 break;
405
406 case T_ADDRESS:
407 __ mov_slow(Rtemp, c->as_jint());
408 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
409 break;
410
411 case T_OBJECT:
412 __ mov_oop(Rtemp, c->as_jobject());
413 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
414 break;
415
416 case T_LONG: // fall through
417 case T_DOUBLE:
418 __ mov_slow(Rtemp, c->as_jint_lo_bits());
419 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
420 if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) {
421 __ mov_slow(Rtemp, c->as_jint_hi_bits());
422 }
423 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
424 break;
425
426 default:
427 ShouldNotReachHere();
428 }
429 }
430
431 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
432 CodeEmitInfo* info, bool wide) {
433 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL),"cannot handle otherwise");
434 __ mov(Rtemp, 0);
435
436 int null_check_offset = code_offset();
437 __ str(Rtemp, as_Address(dest->as_address_ptr()));
438
439 if (info != NULL) {
440 assert(false, "arm32 didn't support this before, investigate if bug");
441 add_debug_info_for_null_check(null_check_offset, info);
442 }
443 }
444
445 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
446 assert(src->is_register() && dest->is_register(), "must be");
447
448 if (src->is_single_cpu()) {
449 if (dest->is_single_cpu()) {
450 move_regs(src->as_register(), dest->as_register());
451 } else if (dest->is_single_fpu()) {
452 __ fmsr(dest->as_float_reg(), src->as_register());
453 } else {
454 ShouldNotReachHere();
455 }
456 } else if (src->is_double_cpu()) {
457 if (dest->is_double_cpu()) {
458 __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi());
459 } else {
460 __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi());
461 }
462 } else if (src->is_single_fpu()) {
463 if (dest->is_single_fpu()) {
464 __ mov_float(dest->as_float_reg(), src->as_float_reg());
465 } else if (dest->is_single_cpu()) {
466 __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg());
467 } else {
468 ShouldNotReachHere();
469 }
470 } else if (src->is_double_fpu()) {
471 if (dest->is_double_fpu()) {
472 __ mov_double(dest->as_double_reg(), src->as_double_reg());
473 } else if (dest->is_double_cpu()) {
474 __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg());
475 } else {
476 ShouldNotReachHere();
477 }
478 } else {
479 ShouldNotReachHere();
480 }
481 }
482
483 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
484 assert(src->is_register(), "should not call otherwise");
485 assert(dest->is_stack(), "should not call otherwise");
486
487 Address addr = dest->is_single_word() ?
488 frame_map()->address_for_slot(dest->single_stack_ix()) :
489 frame_map()->address_for_slot(dest->double_stack_ix());
490
491 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
492 if (src->is_single_fpu() || src->is_double_fpu()) {
493 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
494 }
495
496 if (src->is_single_cpu()) {
497 switch (type) {
498 case T_OBJECT:
499 case T_ARRAY: __ verify_oop(src->as_register()); // fall through
500 case T_ADDRESS:
501 case T_METADATA: __ str(src->as_register(), addr); break;
502 case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through
503 case T_INT: __ str_32(src->as_register(), addr); break;
504 default:
505 ShouldNotReachHere();
506 }
507 } else if (src->is_double_cpu()) {
508 __ str(src->as_register_lo(), addr);
509 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
510 } else if (src->is_single_fpu()) {
511 __ str_float(src->as_float_reg(), addr);
512 } else if (src->is_double_fpu()) {
513 __ str_double(src->as_double_reg(), addr);
514 } else {
515 ShouldNotReachHere();
516 }
517 }
518
519
520 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
521 LIR_PatchCode patch_code, CodeEmitInfo* info,
522 bool pop_fpu_stack, bool wide,
523 bool unaligned) {
524 LIR_Address* to_addr = dest->as_address_ptr();
525 Register base_reg = to_addr->base()->as_pointer_register();
526 const bool needs_patching = (patch_code != lir_patch_none);
527
528 PatchingStub* patch = NULL;
529 if (needs_patching) {
530 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
531 }
532
533 int null_check_offset = code_offset();
534
535 switch (type) {
536 case T_ARRAY:
537 case T_OBJECT:
538 if (UseCompressedOops && !wide) {
539 ShouldNotReachHere();
540 } else {
541 __ str(src->as_register(), as_Address(to_addr));
542 }
543 break;
544
545 case T_ADDRESS:
546 __ str(src->as_pointer_register(), as_Address(to_addr));
547 break;
548
549 case T_BYTE:
550 case T_BOOLEAN:
551 __ strb(src->as_register(), as_Address(to_addr));
552 break;
553
554 case T_CHAR:
555 case T_SHORT:
556 __ strh(src->as_register(), as_Address(to_addr));
557 break;
558
559 case T_INT:
560 #ifdef __SOFTFP__
561 case T_FLOAT:
562 #endif // __SOFTFP__
563 __ str_32(src->as_register(), as_Address(to_addr));
564 break;
565
566
567 #ifdef __SOFTFP__
568 case T_DOUBLE:
569 #endif // __SOFTFP__
570 case T_LONG: {
571 Register from_lo = src->as_register_lo();
572 Register from_hi = src->as_register_hi();
573 if (to_addr->index()->is_register()) {
574 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
575 assert(to_addr->disp() == 0, "Not yet supporting both");
576 __ add(Rtemp, base_reg, to_addr->index()->as_register());
577 base_reg = Rtemp;
578 __ str(from_lo, Address(Rtemp));
579 if (patch != NULL) {
580 patching_epilog(patch, lir_patch_low, base_reg, info);
581 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
582 patch_code = lir_patch_high;
583 }
584 __ str(from_hi, Address(Rtemp, BytesPerWord));
585 } else if (base_reg == from_lo) {
609 __ add(Rtemp, base_reg, to_addr->index()->as_register());
610 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
611 __ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp()));
612 } else {
613 __ fsts(src->as_float_reg(), as_Address(to_addr));
614 }
615 break;
616
617 case T_DOUBLE:
618 if (to_addr->index()->is_register()) {
619 assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
620 __ add(Rtemp, base_reg, to_addr->index()->as_register());
621 if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
622 __ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp()));
623 } else {
624 __ fstd(src->as_double_reg(), as_Address(to_addr));
625 }
626 break;
627 #endif // __SOFTFP__
628
629
630 default:
631 ShouldNotReachHere();
632 }
633
634 if (info != NULL) {
635 add_debug_info_for_null_check(null_check_offset, info);
636 }
637
638 if (patch != NULL) {
639 // Offset embeedded into LDR/STR instruction may appear not enough
640 // to address a field. So, provide a space for one more instruction
641 // that will deal with larger offsets.
642 __ nop();
643 patching_epilog(patch, patch_code, base_reg, info);
644 }
645 }
646
647
648 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
649 assert(src->is_stack(), "should not call otherwise");
650 assert(dest->is_register(), "should not call otherwise");
651
652 Address addr = src->is_single_word() ?
653 frame_map()->address_for_slot(src->single_stack_ix()) :
654 frame_map()->address_for_slot(src->double_stack_ix());
655
656 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
657 if (dest->is_single_fpu() || dest->is_double_fpu()) {
658 if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
659 }
660
661 if (dest->is_single_cpu()) {
662 switch (type) {
663 case T_OBJECT:
664 case T_ARRAY:
665 case T_ADDRESS:
666 case T_METADATA: __ ldr(dest->as_register(), addr); break;
667 case T_FLOAT: // used in floatToRawIntBits intrinsic implemenation
668 case T_INT: __ ldr_u32(dest->as_register(), addr); break;
669 default:
670 ShouldNotReachHere();
671 }
672 if ((type == T_OBJECT) || (type == T_ARRAY)) {
673 __ verify_oop(dest->as_register());
674 }
675 } else if (dest->is_double_cpu()) {
676 __ ldr(dest->as_register_lo(), addr);
677 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
678 } else if (dest->is_single_fpu()) {
679 __ ldr_float(dest->as_float_reg(), addr);
680 } else if (dest->is_double_fpu()) {
681 __ ldr_double(dest->as_double_reg(), addr);
682 } else {
683 ShouldNotReachHere();
684 }
685 }
686
687
688 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
689 if (src->is_single_stack()) {
690 switch (src->type()) {
691 case T_OBJECT:
692 case T_ARRAY:
693 case T_ADDRESS:
694 case T_METADATA:
695 __ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
696 __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
697 break;
698
699 case T_INT:
700 case T_FLOAT:
701 __ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
702 __ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
703 break;
704
705 default:
706 ShouldNotReachHere();
707 }
708 } else {
709 assert(src->is_double_stack(), "must be");
710 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes));
711 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
712 __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
713 __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
714 }
715 }
716
717
718 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
719 LIR_PatchCode patch_code, CodeEmitInfo* info,
720 bool wide, bool unaligned) {
721 assert(src->is_address(), "should not call otherwise");
722 assert(dest->is_register(), "should not call otherwise");
723 LIR_Address* addr = src->as_address_ptr();
724
725 Register base_reg = addr->base()->as_pointer_register();
726
727 PatchingStub* patch = NULL;
728 if (patch_code != lir_patch_none) {
729 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
730 }
731 if (info != NULL) {
732 add_debug_info_for_null_check_here(info);
733 }
734
735 switch (type) {
736 case T_OBJECT: // fall through
737 case T_ARRAY:
738 if (UseCompressedOops && !wide) {
739 __ ldr_u32(dest->as_register(), as_Address(addr));
740 } else {
741 __ ldr(dest->as_register(), as_Address(addr));
742 }
743 break;
744
745 case T_ADDRESS:
746 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
747 __ ldr_u32(dest->as_pointer_register(), as_Address(addr));
748 } else {
749 __ ldr(dest->as_pointer_register(), as_Address(addr));
750 }
751 break;
752
753 case T_INT:
754 #ifdef __SOFTFP__
755 case T_FLOAT:
756 #endif // __SOFTFP__
757 __ ldr(dest->as_pointer_register(), as_Address(addr));
758 break;
759
760 case T_BOOLEAN:
761 __ ldrb(dest->as_register(), as_Address(addr));
762 break;
763
764 case T_BYTE:
765 __ ldrsb(dest->as_register(), as_Address(addr));
766 break;
767
768 case T_CHAR:
769 __ ldrh(dest->as_register(), as_Address(addr));
770 break;
771
772 case T_SHORT:
773 __ ldrsh(dest->as_register(), as_Address(addr));
774 break;
775
776
777 #ifdef __SOFTFP__
778 case T_DOUBLE:
779 #endif // __SOFTFP__
780 case T_LONG: {
781 Register to_lo = dest->as_register_lo();
782 Register to_hi = dest->as_register_hi();
783 if (addr->index()->is_register()) {
784 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
785 assert(addr->disp() == 0, "Not yet supporting both");
786 __ add(Rtemp, base_reg, addr->index()->as_register());
787 base_reg = Rtemp;
788 __ ldr(to_lo, Address(Rtemp));
789 if (patch != NULL) {
790 patching_epilog(patch, lir_patch_low, base_reg, info);
791 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
792 patch_code = lir_patch_high;
793 }
794 __ ldr(to_hi, Address(Rtemp, BytesPerWord));
795 } else if (base_reg == to_lo) {
819 __ add(Rtemp, base_reg, addr->index()->as_register());
820 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
821 __ flds(dest->as_float_reg(), Address(Rtemp, addr->disp()));
822 } else {
823 __ flds(dest->as_float_reg(), as_Address(addr));
824 }
825 break;
826
827 case T_DOUBLE:
828 if (addr->index()->is_register()) {
829 assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
830 __ add(Rtemp, base_reg, addr->index()->as_register());
831 if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
832 __ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp()));
833 } else {
834 __ fldd(dest->as_double_reg(), as_Address(addr));
835 }
836 break;
837 #endif // __SOFTFP__
838
839
840 default:
841 ShouldNotReachHere();
842 }
843
844 if (patch != NULL) {
845 // Offset embeedded into LDR/STR instruction may appear not enough
846 // to address a field. So, provide a space for one more instruction
847 // that will deal with larger offsets.
848 __ nop();
849 patching_epilog(patch, patch_code, base_reg, info);
850 }
851
852 }
853
854
855 void LIR_Assembler::emit_op3(LIR_Op3* op) {
856 bool is_32 = op->result_opr()->is_single_cpu();
857
858 if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) {
859 int c = op->in_opr2()->as_constant_ptr()->as_jint();
860 assert(is_power_of_2(c), "non power-of-2 constant should be put in a register");
861
862 Register left = op->in_opr1()->as_register();
863 Register dest = op->result_opr()->as_register();
864 if (c == 1) {
865 __ mov(dest, left);
866 } else if (c == 2) {
867 __ add_32(dest, left, AsmOperand(left, lsr, 31));
868 __ asr_32(dest, dest, 1);
869 } else if (c != (int) 0x80000000) {
870 int power = log2_intptr(c);
871 __ asr_32(Rtemp, left, 31);
872 __ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0);
873 __ asr_32(dest, dest, power); // dest = dest >>> power;
874 } else {
875 // x/0x80000000 is a special case, since dividend is a power of two, but is negative.
876 // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000.
877 __ cmp_32(left, c);
878 __ mov(dest, 0, ne);
879 __ mov(dest, 1, eq);
880 }
881 } else {
882 assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3");
883 __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
884 add_debug_info_for_div0_here(op->info());
885 }
886 }
887
888
889 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
890 #ifdef ASSERT
891 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
892 if (op->block() != NULL) _branch_target_blocks.append(op->block());
893 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
894 assert(op->info() == NULL, "CodeEmitInfo?");
895 #endif // ASSERT
896
897 #ifdef __SOFTFP__
898 assert (op->code() != lir_cond_float_branch, "this should be impossible");
899 #else
900 if (op->code() == lir_cond_float_branch) {
901 __ fmstat();
902 __ b(*(op->ublock()->label()), vs);
903 }
904 #endif // __SOFTFP__
905
906 AsmCondition acond = al;
907 switch (op->cond()) {
908 case lir_cond_equal: acond = eq; break;
909 case lir_cond_notEqual: acond = ne; break;
910 case lir_cond_less: acond = lt; break;
911 case lir_cond_lessEqual: acond = le; break;
912 case lir_cond_greaterEqual: acond = ge; break;
913 case lir_cond_greater: acond = gt; break;
914 case lir_cond_aboveEqual: acond = hs; break;
915 case lir_cond_belowEqual: acond = ls; break;
916 default: assert(op->cond() == lir_cond_always, "must be");
917 }
918 __ b(*(op->label()), acond);
919 }
920
921
922 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
923 LIR_Opr src = op->in_opr();
924 LIR_Opr dest = op->result_opr();
925
926 switch (op->bytecode()) {
927 case Bytecodes::_i2l:
928 move_regs(src->as_register(), dest->as_register_lo());
929 __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31));
930 break;
931 case Bytecodes::_l2i:
932 move_regs(src->as_register_lo(), dest->as_register());
933 break;
934 case Bytecodes::_i2b:
935 __ sign_extend(dest->as_register(), src->as_register(), 8);
936 break;
937 case Bytecodes::_i2s:
938 __ sign_extend(dest->as_register(), src->as_register(), 16);
939 break;
940 case Bytecodes::_i2c:
941 __ zero_extend(dest->as_register(), src->as_register(), 16);
942 break;
943 case Bytecodes::_f2d:
944 __ convert_f2d(dest->as_double_reg(), src->as_float_reg());
945 break;
946 case Bytecodes::_d2f:
947 __ convert_d2f(dest->as_float_reg(), src->as_double_reg());
948 break;
949 case Bytecodes::_i2f:
950 __ fmsr(Stemp, src->as_register());
951 __ fsitos(dest->as_float_reg(), Stemp);
952 break;
953 case Bytecodes::_i2d:
954 __ fmsr(Stemp, src->as_register());
955 __ fsitod(dest->as_double_reg(), Stemp);
956 break;
957 case Bytecodes::_f2i:
958 __ ftosizs(Stemp, src->as_float_reg());
959 __ fmrs(dest->as_register(), Stemp);
960 break;
961 case Bytecodes::_d2i:
962 __ ftosizd(Stemp, src->as_double_reg());
963 __ fmrs(dest->as_register(), Stemp);
964 break;
965 default:
966 ShouldNotReachHere();
967 }
968 }
969
970
971 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
972 if (op->init_check()) {
973 Register tmp = op->tmp1()->as_register();
974 __ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
975 add_debug_info_for_null_check_here(op->stub()->info());
976 __ cmp(tmp, InstanceKlass::fully_initialized);
977 __ b(*op->stub()->entry(), ne);
978 }
979 __ allocate_object(op->obj()->as_register(),
980 op->tmp1()->as_register(),
981 op->tmp2()->as_register(),
982 op->tmp3()->as_register(),
983 op->header_size(),
984 op->object_size(),
1050 md = method->method_data_or_null();
1051 assert(md != NULL, "Sanity");
1052 data = md->bci_to_data(bci);
1053 assert(data != NULL, "need data for checkcast");
1054 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1055 if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) {
1056 // The offset is large so bias the mdo by the base of the slot so
1057 // that the ldr can use an immediate offset to reference the slots of the data
1058 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
1059 }
1060 }
1061
1062 // On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null).
1063 void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci,
1064 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias,
1065 Register obj, Register mdo, Register data_val, Label* obj_is_null) {
1066 assert(method != NULL, "Should have method");
1067 assert_different_registers(obj, mdo, data_val);
1068 setup_md_access(method, bci, md, data, mdo_offset_bias);
1069 Label not_null;
1070 __ b(not_null, ne);
1071 __ mov_metadata(mdo, md->constant_encoding());
1072 if (mdo_offset_bias > 0) {
1073 __ mov_slow(data_val, mdo_offset_bias);
1074 __ add(mdo, mdo, data_val);
1075 }
1076 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
1077 __ ldrb(data_val, flags_addr);
1078 __ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant());
1079 __ strb(data_val, flags_addr);
1080 __ b(*obj_is_null);
1081 __ bind(not_null);
1082 }
1083
1084 void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias,
1085 Register mdo, Register recv, Register value, Register tmp1,
1086 Label* profile_cast_success, Label* profile_cast_failure,
1087 Label* success, Label* failure) {
1088 assert_different_registers(mdo, value, tmp1);
1089 __ bind(*profile_cast_success);
1090 __ mov_metadata(mdo, md->constant_encoding());
1092 __ mov_slow(tmp1, mdo_offset_bias);
1093 __ add(mdo, mdo, tmp1);
1094 }
1095 __ load_klass(recv, value);
1096 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
1097 __ b(*success);
1098 // Cast failure case
1099 __ bind(*profile_cast_failure);
1100 __ mov_metadata(mdo, md->constant_encoding());
1101 if (mdo_offset_bias > 0) {
1102 __ mov_slow(tmp1, mdo_offset_bias);
1103 __ add(mdo, mdo, tmp1);
1104 }
1105 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
1106 __ ldr(tmp1, data_addr);
1107 __ sub(tmp1, tmp1, DataLayout::counter_increment);
1108 __ str(tmp1, data_addr);
1109 __ b(*failure);
1110 }
1111
1112 // Sets `res` to true, if `cond` holds.
1113 static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) {
1114 __ mov(res, 1, cond);
1115 }
1116
1117
1118 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1119 // TODO: ARM - can be more effective with one more register
1120 switch (op->code()) {
1121 case lir_store_check: {
1122 CodeStub* stub = op->stub();
1123 Register value = op->object()->as_register();
1124 Register array = op->array()->as_register();
1125 Register klass_RInfo = op->tmp1()->as_register();
1126 Register k_RInfo = op->tmp2()->as_register();
1127 assert_different_registers(klass_RInfo, k_RInfo, Rtemp);
1128 if (op->should_profile()) {
1129 assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp);
1130 }
1131
1132 // check if it needs to be profiled
1133 ciMethodData* md;
1134 ciProfileData* data;
1135 int mdo_offset_bias = 0;
1136 Label profile_cast_success, profile_cast_failure, done;
1137 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1138 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1139
1140 if (op->should_profile()) {
1141 __ cmp(value, 0);
1142 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done);
1143 } else {
1144 __ cbz(value, done);
1145 }
1146 assert_different_registers(k_RInfo, value);
1147 add_debug_info_for_null_check_here(op->info_for_exception());
1148 __ load_klass(k_RInfo, array);
1149 __ load_klass(klass_RInfo, value);
1150 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1151 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1152 // check for immediate positive hit
1153 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1154 __ cmp(klass_RInfo, k_RInfo);
1155 __ cond_cmp(Rtemp, k_RInfo, ne);
1156 __ b(*success_target, eq);
1157 // check for immediate negative hit
1158 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1159 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1160 __ b(*failure_target, ne);
1161 // slow case
1183 Register klass_RInfo = op->tmp1()->as_register();
1184 Register k_RInfo = op->tmp2()->as_register();
1185 ciKlass* k = op->klass();
1186 assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp);
1187
1188 if (stub->is_simple_exception_stub()) {
1189 // TODO: ARM - Late binding is used to prevent confusion of register allocator
1190 assert(stub->is_exception_throw_stub(), "must be");
1191 ((SimpleExceptionStub*)stub)->set_obj(op->result_opr());
1192 }
1193 ciMethodData* md;
1194 ciProfileData* data;
1195 int mdo_offset_bias = 0;
1196
1197 Label done;
1198
1199 Label profile_cast_failure, profile_cast_success;
1200 Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry();
1201 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1202
1203
1204 __ movs(res, obj);
1205 if (op->should_profile()) {
1206 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1207 } else {
1208 __ b(done, eq);
1209 }
1210 if (k->is_loaded()) {
1211 __ mov_metadata(k_RInfo, k->constant_encoding());
1212 } else if (k_RInfo != obj) {
1213 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1214 __ movs(res, obj);
1215 } else {
1216 // Patching doesn't update "res" register after GC, so do patching first
1217 klass2reg_with_patching(Rtemp, op->info_for_patch());
1218 __ movs(res, obj);
1219 __ mov(k_RInfo, Rtemp);
1220 }
1221 __ load_klass(klass_RInfo, res, ne);
1222
1237 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1238 __ cbz(R0, *failure_target);
1239 }
1240 } else {
1241 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1242 __ b(*success_target, eq);
1243 // check for immediate positive hit
1244 __ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1245 __ cmp(klass_RInfo, k_RInfo);
1246 __ cmp(Rtemp, k_RInfo, ne);
1247 __ b(*success_target, eq);
1248 // check for immediate negative hit
1249 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1250 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1251 __ b(*failure_target, ne);
1252 // slow case
1253 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1254 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1255 __ cbz(R0, *failure_target);
1256 }
1257
1258 if (op->should_profile()) {
1259 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1260 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1261 &profile_cast_success, &profile_cast_failure,
1262 &done, stub->entry());
1263 }
1264 __ bind(done);
1265 break;
1266 }
1267
1268 case lir_instanceof: {
1269 Register obj = op->object()->as_register();
1270 Register res = op->result_opr()->as_register();
1271 Register klass_RInfo = op->tmp1()->as_register();
1272 Register k_RInfo = op->tmp2()->as_register();
1273 ciKlass* k = op->klass();
1274 assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp);
1275
1276 ciMethodData* md;
1277 ciProfileData* data;
1278 int mdo_offset_bias = 0;
1279
1280 Label done;
1281
1282 Label profile_cast_failure, profile_cast_success;
1283 Label *failure_target = op->should_profile() ? &profile_cast_failure : &done;
1284 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1285
1286 __ movs(res, obj);
1287
1288 if (op->should_profile()) {
1289 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1290 } else {
1291 __ b(done, eq);
1292 }
1293
1294 if (k->is_loaded()) {
1295 __ mov_metadata(k_RInfo, k->constant_encoding());
1296 } else {
1297 op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res));
1298 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1299 }
1300 __ load_klass(klass_RInfo, res);
1301
1302 if (!op->should_profile()) {
1303 __ mov(res, 0);
1304 }
1305
1306 if (op->fast_check()) {
1307 __ cmp(klass_RInfo, k_RInfo);
1308 if (!op->should_profile()) {
1309 set_instanceof_result(_masm, res, eq);
1310 } else {
1311 __ b(profile_cast_failure, ne);
1312 }
1313 } else if (k->is_loaded()) {
1314 __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1315 if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1316 __ cmp(Rtemp, k_RInfo);
1317 if (!op->should_profile()) {
1318 set_instanceof_result(_masm, res, eq);
1319 } else {
1320 __ b(profile_cast_failure, ne);
1321 }
1322 } else {
1323 __ cmp(klass_RInfo, k_RInfo);
1324 __ cond_cmp(Rtemp, k_RInfo, ne);
1325 if (!op->should_profile()) {
1326 set_instanceof_result(_masm, res, eq);
1327 }
1328 __ b(*success_target, eq);
1329 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1330 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1331 if (!op->should_profile()) {
1332 move_regs(R0, res);
1333 } else {
1334 __ cbz(R0, *failure_target);
1335 }
1336 }
1337 } else {
1338 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1339 // check for immediate positive hit
1340 __ cmp(klass_RInfo, k_RInfo);
1341 if (!op->should_profile()) {
1342 __ ldr(res, Address(klass_RInfo, Rtemp), ne);
1343 __ cond_cmp(res, k_RInfo, ne);
1344 set_instanceof_result(_masm, res, eq);
1345 } else {
1346 __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne);
1347 __ cond_cmp(Rtemp, k_RInfo, ne);
1348 }
1349 __ b(*success_target, eq);
1350 // check for immediate negative hit
1351 if (op->should_profile()) {
1352 __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1353 }
1354 __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1355 if (!op->should_profile()) {
1356 __ mov(res, 0, ne);
1357 }
1358 __ b(*failure_target, ne);
1359 // slow case
1360 assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1361 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1362 if (!op->should_profile()) {
1363 move_regs(R0, res);
1364 }
1365 if (op->should_profile()) {
1366 __ cbz(R0, *failure_target);
1367 }
1368 }
1369
1370 if (op->should_profile()) {
1371 Label done_ok, done_failure;
1372 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1373 typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1374 &profile_cast_success, &profile_cast_failure,
1375 &done_ok, &done_failure);
1376 __ bind(done_failure);
1378 __ b(done);
1379 __ bind(done_ok);
1380 __ mov(res, 1);
1381 }
1382 __ bind(done);
1383 break;
1384 }
1385 default:
1386 ShouldNotReachHere();
1387 }
1388 }
1389
1390
1391 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1392 // if (*addr == cmpval) {
1393 // *addr = newval;
1394 // dest = 1;
1395 // } else {
1396 // dest = 0;
1397 // }
1398 // FIXME: membar_release
1399 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
1400 Register addr = op->addr()->is_register() ?
1401 op->addr()->as_pointer_register() :
1402 op->addr()->as_address_ptr()->base()->as_pointer_register();
1403 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp");
1404 assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_OprDesc::illegalOpr(), "unexpected index");
1405 if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1406 Register cmpval = op->cmp_value()->as_register();
1407 Register newval = op->new_value()->as_register();
1408 Register dest = op->result_opr()->as_register();
1409 assert_different_registers(dest, addr, cmpval, newval, Rtemp);
1410
1411 __ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer
1412 __ mov(dest, 1, eq);
1413 __ mov(dest, 0, ne);
1414 } else if (op->code() == lir_cas_long) {
1415 assert(VM_Version::supports_cx8(), "wrong machine");
1416 Register cmp_value_lo = op->cmp_value()->as_register_lo();
1417 Register cmp_value_hi = op->cmp_value()->as_register_hi();
1418 Register new_value_lo = op->new_value()->as_register_lo();
1419 Register new_value_hi = op->new_value()->as_register_hi();
1420 Register dest = op->result_opr()->as_register();
1421 Register tmp_lo = op->tmp1()->as_register_lo();
1422 Register tmp_hi = op->tmp1()->as_register_hi();
1423
1424 assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr);
1425 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
1426 assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair");
1427 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
1428 assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair");
1429 __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi,
1430 new_value_lo, new_value_hi, addr, 0);
1431 } else {
1432 Unimplemented();
1433 }
1434 // FIXME: is full membar really needed instead of just membar_acquire?
1435 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
1436 }
1437
1438
1439 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1440 AsmCondition acond = al;
1441 AsmCondition ncond = nv;
1442 if (opr1 != opr2) {
1443 switch (condition) {
1444 case lir_cond_equal: acond = eq; ncond = ne; break;
1445 case lir_cond_notEqual: acond = ne; ncond = eq; break;
1446 case lir_cond_less: acond = lt; ncond = ge; break;
1447 case lir_cond_lessEqual: acond = le; ncond = gt; break;
1448 case lir_cond_greaterEqual: acond = ge; ncond = lt; break;
1449 case lir_cond_greater: acond = gt; ncond = le; break;
1450 case lir_cond_aboveEqual: acond = hs; ncond = lo; break;
1451 case lir_cond_belowEqual: acond = ls; ncond = hi; break;
1452 default: ShouldNotReachHere();
1453 }
1454 }
1455
1456 for (;;) { // two iterations only
1457 if (opr1 == result) {
1458 // do nothing
1459 } else if (opr1->is_single_cpu()) {
1460 __ mov(result->as_register(), opr1->as_register(), acond);
1461 } else if (opr1->is_double_cpu()) {
1462 __ long_move(result->as_register_lo(), result->as_register_hi(),
1463 opr1->as_register_lo(), opr1->as_register_hi(), acond);
1464 } else if (opr1->is_single_stack()) {
1465 __ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond);
1466 } else if (opr1->is_double_stack()) {
1467 __ ldr(result->as_register_lo(),
1468 frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond);
1469 __ ldr(result->as_register_hi(),
1470 frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond);
1471 } else if (opr1->is_illegal()) {
1472 // do nothing: this part of the cmove has been optimized away in the peephole optimizer
1473 } else {
1474 assert(opr1->is_constant(), "must be");
1475 LIR_Const* c = opr1->as_constant_ptr();
1495 break;
1496 case T_DOUBLE:
1497 #ifdef __SOFTFP__
1498 // not generated now.
1499 __ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond);
1500 __ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond);
1501 #else
1502 __ mov_double(result->as_double_reg(), c->as_jdouble(), acond);
1503 #endif // __SOFTFP__
1504 break;
1505 default:
1506 ShouldNotReachHere();
1507 }
1508 }
1509
1510 // Negate the condition and repeat the algorithm with the second operand
1511 if (opr1 == opr2) { break; }
1512 opr1 = opr2;
1513 acond = ncond;
1514 }
1515 }
1516
1517 #if defined(ASSERT)
1518 static int reg_size(LIR_Opr op) {
1519 switch (op->type()) {
1520 case T_FLOAT:
1521 case T_INT: return BytesPerInt;
1522 case T_LONG:
1523 case T_DOUBLE: return BytesPerLong;
1524 case T_OBJECT:
1525 case T_ARRAY:
1526 case T_METADATA: return BytesPerWord;
1527 case T_ADDRESS:
1528 case T_ILLEGAL: // fall through
1529 default: ShouldNotReachHere(); return -1;
1530 }
1531 }
1532 #endif
1533
1534 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1535 assert(info == NULL, "unused on this code path");
1536 assert(dest->is_register(), "wrong items state");
1537
1538 if (right->is_address()) {
1539 // special case for adding shifted/extended register
1540 const Register res = dest->as_pointer_register();
1541 const Register lreg = left->as_pointer_register();
1542 const LIR_Address* addr = right->as_address_ptr();
1543
1544 assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1545
1546 int scale = addr->scale();
1547 AsmShift shift = lsl;
1548
1549
1550 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be");
1551 assert(reg_size(addr->base()) == reg_size(dest), "should be");
1552 assert(reg_size(dest) == wordSize, "should be");
1553
1554 AsmOperand operand(addr->index()->as_pointer_register(), shift, scale);
1555 switch (code) {
1556 case lir_add: __ add(res, lreg, operand); break;
1557 case lir_sub: __ sub(res, lreg, operand); break;
1558 default: ShouldNotReachHere();
1559 }
1560
1561 } else if (left->is_address()) {
1562 assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()");
1563 const LIR_Address* addr = left->as_address_ptr();
1564 const Register res = dest->as_register();
1565 const Register rreg = right->as_register();
1566 assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1567 __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale()));
1568
1569 } else if (dest->is_single_cpu()) {
1570 assert(left->is_single_cpu(), "unexpected left operand");
1571
1572 const Register res = dest->as_register();
1573 const Register lreg = left->as_register();
1574
1575 if (right->is_single_cpu()) {
1576 const Register rreg = right->as_register();
1577 switch (code) {
1578 case lir_add: __ add_32(res, lreg, rreg); break;
1579 case lir_sub: __ sub_32(res, lreg, rreg); break;
1580 case lir_mul: __ mul_32(res, lreg, rreg); break;
1581 default: ShouldNotReachHere();
1582 }
1583 } else {
1584 assert(right->is_constant(), "must be");
1585 const jint c = right->as_constant_ptr()->as_jint();
1586 if (!Assembler::is_arith_imm_in_range(c)) {
1587 BAILOUT("illegal arithmetic operand");
1588 }
1589 switch (code) {
1590 case lir_add: __ add_32(res, lreg, c); break;
1591 case lir_sub: __ sub_32(res, lreg, c); break;
1592 default: ShouldNotReachHere();
1593 }
1594 }
1595
1596 } else if (dest->is_double_cpu()) {
1597 Register res_lo = dest->as_register_lo();
1598 Register res_hi = dest->as_register_hi();
1599 Register lreg_lo = left->as_register_lo();
1600 Register lreg_hi = left->as_register_hi();
1601 if (right->is_double_cpu()) {
1602 Register rreg_lo = right->as_register_lo();
1603 Register rreg_hi = right->as_register_hi();
1604 if (res_lo == lreg_hi || res_lo == rreg_hi) {
1605 res_lo = Rtemp;
1606 }
1607 switch (code) {
1608 case lir_add:
1609 __ adds(res_lo, lreg_lo, rreg_lo);
1610 __ adc(res_hi, lreg_hi, rreg_hi);
1611 break;
1612 case lir_sub:
1613 __ subs(res_lo, lreg_lo, rreg_lo);
1614 __ sbc(res_hi, lreg_hi, rreg_hi);
1615 break;
1616 default:
1620 assert(right->is_constant(), "must be");
1621 assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range");
1622 const jint c = (jint) right->as_constant_ptr()->as_jlong();
1623 if (res_lo == lreg_hi) {
1624 res_lo = Rtemp;
1625 }
1626 switch (code) {
1627 case lir_add:
1628 __ adds(res_lo, lreg_lo, c);
1629 __ adc(res_hi, lreg_hi, 0);
1630 break;
1631 case lir_sub:
1632 __ subs(res_lo, lreg_lo, c);
1633 __ sbc(res_hi, lreg_hi, 0);
1634 break;
1635 default:
1636 ShouldNotReachHere();
1637 }
1638 }
1639 move_regs(res_lo, dest->as_register_lo());
1640
1641 } else if (dest->is_single_fpu()) {
1642 assert(left->is_single_fpu(), "must be");
1643 assert(right->is_single_fpu(), "must be");
1644 const FloatRegister res = dest->as_float_reg();
1645 const FloatRegister lreg = left->as_float_reg();
1646 const FloatRegister rreg = right->as_float_reg();
1647 switch (code) {
1648 case lir_add: __ add_float(res, lreg, rreg); break;
1649 case lir_sub: __ sub_float(res, lreg, rreg); break;
1650 case lir_mul_strictfp: // fall through
1651 case lir_mul: __ mul_float(res, lreg, rreg); break;
1652 case lir_div_strictfp: // fall through
1653 case lir_div: __ div_float(res, lreg, rreg); break;
1654 default: ShouldNotReachHere();
1655 }
1656 } else if (dest->is_double_fpu()) {
1657 assert(left->is_double_fpu(), "must be");
1658 assert(right->is_double_fpu(), "must be");
1659 const FloatRegister res = dest->as_double_reg();
1676
1677 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1678 switch (code) {
1679 case lir_abs:
1680 __ abs_double(dest->as_double_reg(), value->as_double_reg());
1681 break;
1682 case lir_sqrt:
1683 __ sqrt_double(dest->as_double_reg(), value->as_double_reg());
1684 break;
1685 default:
1686 ShouldNotReachHere();
1687 }
1688 }
1689
1690
1691 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1692 assert(dest->is_register(), "wrong items state");
1693 assert(left->is_register(), "wrong items state");
1694
1695 if (dest->is_single_cpu()) {
1696
1697 const Register res = dest->as_register();
1698 const Register lreg = left->as_register();
1699
1700 if (right->is_single_cpu()) {
1701 const Register rreg = right->as_register();
1702 switch (code) {
1703 case lir_logic_and: __ and_32(res, lreg, rreg); break;
1704 case lir_logic_or: __ orr_32(res, lreg, rreg); break;
1705 case lir_logic_xor: __ eor_32(res, lreg, rreg); break;
1706 default: ShouldNotReachHere();
1707 }
1708 } else {
1709 assert(right->is_constant(), "must be");
1710 const uint c = (uint)right->as_constant_ptr()->as_jint();
1711 switch (code) {
1712 case lir_logic_and: __ and_32(res, lreg, c); break;
1713 case lir_logic_or: __ orr_32(res, lreg, c); break;
1714 case lir_logic_xor: __ eor_32(res, lreg, c); break;
1715 default: ShouldNotReachHere();
1716 }
1717 }
1718 } else {
1719 assert(dest->is_double_cpu(), "should be");
1720 Register res_lo = dest->as_register_lo();
1721
1722 assert (dest->type() == T_LONG, "unexpected result type");
1723 assert (left->type() == T_LONG, "unexpected left type");
1724 assert (right->type() == T_LONG, "unexpected right type");
1725
1726 const Register res_hi = dest->as_register_hi();
1727 const Register lreg_lo = left->as_register_lo();
1728 const Register lreg_hi = left->as_register_hi();
1729
1730 if (right->is_register()) {
1731 const Register rreg_lo = right->as_register_lo();
1732 const Register rreg_hi = right->as_register_hi();
1733 if (res_lo == lreg_hi || res_lo == rreg_hi) {
1734 res_lo = Rtemp; // Temp register helps to avoid overlap between result and input
1735 }
1736 switch (code) {
1737 case lir_logic_and:
1738 __ andr(res_lo, lreg_lo, rreg_lo);
1739 __ andr(res_hi, lreg_hi, rreg_hi);
1740 break;
1741 case lir_logic_or:
1742 __ orr(res_lo, lreg_lo, rreg_lo);
1743 __ orr(res_hi, lreg_hi, rreg_hi);
1744 break;
1745 case lir_logic_xor:
1746 __ eor(res_lo, lreg_lo, rreg_lo);
1747 __ eor(res_hi, lreg_hi, rreg_hi);
1748 break;
1749 default:
1750 ShouldNotReachHere();
1751 }
1752 move_regs(res_lo, dest->as_register_lo());
1753 } else {
1754 assert(right->is_constant(), "must be");
1755 const jint c_lo = (jint) right->as_constant_ptr()->as_jlong();
1756 const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32);
1757 // Case for logic_or from do_ClassIDIntrinsic()
1758 if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) {
1759 switch (code) {
1760 case lir_logic_and:
1761 __ andr(res_lo, lreg_lo, c_lo);
1762 __ mov(res_hi, 0);
1763 break;
1764 case lir_logic_or:
1765 __ orr(res_lo, lreg_lo, c_lo);
1766 break;
1767 case lir_logic_xor:
1768 __ eor(res_lo, lreg_lo, c_lo);
1769 break;
1770 default:
1771 ShouldNotReachHere();
1772 }
1773 } else if (code == lir_logic_and &&
1774 c_hi == -1 &&
1775 (AsmOperand::is_rotated_imm(c_lo) ||
1776 AsmOperand::is_rotated_imm(~c_lo))) {
1777 // Another case which handles logic_and from do_ClassIDIntrinsic()
1778 if (AsmOperand::is_rotated_imm(c_lo)) {
1779 __ andr(res_lo, lreg_lo, c_lo);
1780 } else {
1781 __ bic(res_lo, lreg_lo, ~c_lo);
1782 }
1783 if (res_hi != lreg_hi) {
1784 __ mov(res_hi, lreg_hi);
1785 }
1786 } else {
1787 BAILOUT("64 bit constant cannot be inlined");
1788 }
1789 }
1790 }
1791 }
1792
1793
1794
1795 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1796 if (opr1->is_single_cpu()) {
1797 if (opr2->is_constant()) {
1798 switch (opr2->as_constant_ptr()->type()) {
1799 case T_INT: {
1800 const jint c = opr2->as_constant_ptr()->as_jint();
1801 if (Assembler::is_arith_imm_in_range(c)) {
1802 __ cmp_32(opr1->as_register(), c);
1803 } else if (Assembler::is_arith_imm_in_range(-c)) {
1804 __ cmn_32(opr1->as_register(), -c);
1805 } else {
1806 // This can happen when compiling lookupswitch
1807 __ mov_slow(Rtemp, c);
1808 __ cmp_32(opr1->as_register(), Rtemp);
1809 }
1810 break;
1811 }
1812 case T_OBJECT:
1813 assert(opr2->as_constant_ptr()->as_jobject() == NULL, "cannot handle otherwise");
1814 __ cmp(opr1->as_register(), 0);
1815 break;
1816 default:
1817 ShouldNotReachHere();
1818 }
1819 } else if (opr2->is_single_cpu()) {
1820 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) {
1821 assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY || opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type");
1822 __ cmp(opr1->as_register(), opr2->as_register());
1823 } else {
1824 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type");
1825 __ cmp_32(opr1->as_register(), opr2->as_register());
1826 }
1827 } else {
1828 ShouldNotReachHere();
1829 }
1830 } else if (opr1->is_double_cpu()) {
1831 Register xlo = opr1->as_register_lo();
1832 Register xhi = opr1->as_register_hi();
1833 if (opr2->is_constant() && opr2->as_jlong() == 0) {
1834 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise");
1835 __ orrs(Rtemp, xlo, xhi);
1836 } else if (opr2->is_register()) {
1837 Register ylo = opr2->as_register_lo();
1838 Register yhi = opr2->as_register_hi();
1839 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1840 __ teq(xhi, yhi);
1841 __ teq(xlo, ylo, eq);
1842 } else {
1843 __ subs(xlo, xlo, ylo);
1844 __ sbcs(xhi, xhi, yhi);
1845 }
1846 } else {
1847 ShouldNotReachHere();
1848 }
1849 } else if (opr1->is_single_fpu()) {
1850 if (opr2->is_constant()) {
1851 assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise");
1852 __ cmp_zero_float(opr1->as_float_reg());
1853 } else {
1854 __ cmp_float(opr1->as_float_reg(), opr2->as_float_reg());
1855 }
1856 } else if (opr1->is_double_fpu()) {
1857 if (opr2->is_constant()) {
1858 assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise");
1859 __ cmp_zero_double(opr1->as_double_reg());
1860 } else {
1861 __ cmp_double(opr1->as_double_reg(), opr2->as_double_reg());
1862 }
1863 } else {
1864 ShouldNotReachHere();
1865 }
1866 }
1867
1868 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1869 const Register res = dst->as_register();
1870 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1871 comp_op(lir_cond_unknown, left, right, op);
1872 __ fmstat();
1873 if (code == lir_ucmp_fd2i) { // unordered is less
1874 __ mvn(res, 0, lt);
1875 __ mov(res, 1, ge);
1876 } else { // unordered is greater
1877 __ mov(res, 1, cs);
1878 __ mvn(res, 0, cc);
1879 }
1880 __ mov(res, 0, eq);
1881
1882 } else {
1883 assert(code == lir_cmp_l2i, "must be");
1884
1885 Label done;
1886 const Register xlo = left->as_register_lo();
1887 const Register xhi = left->as_register_hi();
1888 const Register ylo = right->as_register_lo();
1889 const Register yhi = right->as_register_hi();
1890 __ cmp(xhi, yhi);
1891 __ mov(res, 1, gt);
1892 __ mvn(res, 0, lt);
1893 __ b(done, ne);
1894 __ subs(res, xlo, ylo);
1895 __ mov(res, 1, hi);
1896 __ mvn(res, 0, lo);
1897 __ bind(done);
1898 }
1899 }
1900
1901
1902 void LIR_Assembler::align_call(LIR_Code code) {
1903 // Not needed
1904 }
1905
1906
1907 void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) {
1908 int ret_addr_offset = __ patchable_call(op->addr(), rtype);
1909 assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
1910 add_call_info_here(op->info());
1911 }
1912
1913
1914 void LIR_Assembler::ic_call(LIR_OpJavaCall *op) {
1915 bool near_range = __ cache_fully_reachable();
1916 address oop_address = pc();
1917
1918 bool use_movw = VM_Version::supports_movw();
1919
1920 // Ricklass may contain something that is not a metadata pointer so
1921 // mov_metadata can't be used
1922 InlinedAddress value((address)Universe::non_oop_word());
1923 InlinedAddress addr(op->addr());
1924 if (use_movw) {
1925 __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff);
1926 __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16);
1927 } else {
1928 // No movw/movt, must be load a pc relative value but no
1929 // relocation so no metadata table to load from.
1930 // Use a b instruction rather than a bl, inline constant after the
1931 // branch, use a PC relative ldr to load the constant, arrange for
1932 // the call to return after the constant(s).
1933 __ ldr_literal(Ricklass, value);
1934 }
1935 __ relocate(virtual_call_Relocation::spec(oop_address));
1936 if (near_range && use_movw) {
1937 __ bl(op->addr());
1938 } else {
1939 Label call_return;
1940 __ adr(LR, call_return);
1941 if (near_range) {
1942 __ b(op->addr());
1943 } else {
1944 __ indirect_jump(addr, Rtemp);
1945 __ bind_literal(addr);
1946 }
1998 assert(exceptionOop->as_register() == Rexception_obj, "must match");
1999 assert(exceptionPC->as_register() == Rexception_pc, "must match");
2000 info->add_register_oop(exceptionOop);
2001
2002 Runtime1::StubID handle_id = compilation()->has_fpu_code() ?
2003 Runtime1::handle_exception_id :
2004 Runtime1::handle_exception_nofpu_id;
2005 Label return_address;
2006 __ adr(Rexception_pc, return_address);
2007 __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type);
2008 __ bind(return_address);
2009 add_call_info_here(info); // for exception handler
2010 }
2011
2012 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2013 assert(exceptionOop->as_register() == Rexception_obj, "must match");
2014 __ b(_unwind_handler_entry);
2015 }
2016
2017 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2018 AsmShift shift = lsl;
2019 switch (code) {
2020 case lir_shl: shift = lsl; break;
2021 case lir_shr: shift = asr; break;
2022 case lir_ushr: shift = lsr; break;
2023 default: ShouldNotReachHere();
2024 }
2025
2026 if (dest->is_single_cpu()) {
2027 __ andr(Rtemp, count->as_register(), 31);
2028 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp));
2029 } else if (dest->is_double_cpu()) {
2030 Register dest_lo = dest->as_register_lo();
2031 Register dest_hi = dest->as_register_hi();
2032 Register src_lo = left->as_register_lo();
2033 Register src_hi = left->as_register_hi();
2034 Register Rcount = count->as_register();
2035 // Resolve possible register conflicts
2036 if (shift == lsl && dest_hi == src_lo) {
2037 dest_hi = Rtemp;
2038 } else if (shift != lsl && dest_lo == src_hi) {
2039 dest_lo = Rtemp;
2040 } else if (dest_lo == src_lo && dest_hi == src_hi) {
2041 dest_lo = Rtemp;
2042 } else if (dest_lo == Rcount || dest_hi == Rcount) {
2043 Rcount = Rtemp;
2044 }
2045 __ andr(Rcount, count->as_register(), 63);
2046 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount);
2047 move_regs(dest_lo, dest->as_register_lo());
2048 move_regs(dest_hi, dest->as_register_hi());
2049 } else {
2050 ShouldNotReachHere();
2051 }
2052 }
2053
2054
2055 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2056 AsmShift shift = lsl;
2057 switch (code) {
2058 case lir_shl: shift = lsl; break;
2059 case lir_shr: shift = asr; break;
2060 case lir_ushr: shift = lsr; break;
2061 default: ShouldNotReachHere();
2062 }
2063
2064 if (dest->is_single_cpu()) {
2065 count &= 31;
2066 if (count != 0) {
2067 __ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count));
2068 } else {
2069 move_regs(left->as_register(), dest->as_register());
2070 }
2071 } else if (dest->is_double_cpu()) {
2072 count &= 63;
2073 if (count != 0) {
2074 Register dest_lo = dest->as_register_lo();
2075 Register dest_hi = dest->as_register_hi();
2076 Register src_lo = left->as_register_lo();
2077 Register src_hi = left->as_register_hi();
2078 // Resolve possible register conflicts
2079 if (shift == lsl && dest_hi == src_lo) {
2080 dest_hi = Rtemp;
2081 } else if (shift != lsl && dest_lo == src_hi) {
2082 dest_lo = Rtemp;
2083 }
2084 __ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count);
2085 move_regs(dest_lo, dest->as_register_lo());
2086 move_regs(dest_hi, dest->as_register_hi());
2087 } else {
2088 __ long_move(dest->as_register_lo(), dest->as_register_hi(),
2089 left->as_register_lo(), left->as_register_hi());
2090 }
2091 } else {
2092 ShouldNotReachHere();
2093 }
2094 }
2095
2096
2097 // Saves 4 given registers in reserved argument area.
2098 void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2099 verify_reserved_argument_area_size(4);
2100 __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4));
2101 }
2102
2103 // Restores 4 given registers from reserved argument area.
2104 void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2105 __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback);
2106 }
2107
2108
2109 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2110 ciArrayKlass* default_type = op->expected_type();
2111 Register src = op->src()->as_register();
2112 Register src_pos = op->src_pos()->as_register();
2113 Register dst = op->dst()->as_register();
2114 Register dst_pos = op->dst_pos()->as_register();
2115 Register length = op->length()->as_register();
2116 Register tmp = op->tmp()->as_register();
2117 Register tmp2 = Rtemp;
2118
2119 assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption");
2120
2121 CodeStub* stub = op->stub();
2122
2123 int flags = op->flags();
2124 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2125 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2126
2127 // If we don't know anything or it's an object array, just go through the generic arraycopy
2128 if (default_type == NULL) {
2129
2130 // save arguments, because they will be killed by a runtime call
2131 save_in_reserved_area(R0, R1, R2, R3);
2132
2133 // pass length argument on SP[0]
2134 __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment
2135
2136 address copyfunc_addr = StubRoutines::generic_arraycopy();
2137 assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2138 #ifndef PRODUCT
2139 if (PrintC1Statistics) {
2140 __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2);
2141 }
2142 #endif // !PRODUCT
2143 // the stub is in the code cache so close enough
2144 __ call(copyfunc_addr, relocInfo::runtime_call_type);
2145
2146 __ add(SP, SP, 2*wordSize);
2147
2148 __ cbz_32(R0, *stub->continuation());
2149
2150 __ mvn_32(tmp, R0);
2151 restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
2152 __ sub_32(length, length, tmp);
2153 __ add_32(src_pos, src_pos, tmp);
2154 __ add_32(dst_pos, dst_pos, tmp);
2155
2156 __ b(*stub->entry());
2157
2158 __ bind(*stub->continuation());
2159 return;
2160 }
2161
2162 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(),
2163 "must be true at this point");
2164 int elem_size = type2aelembytes(basic_type);
2165 int shift = exact_log2(elem_size);
2166
2300 __ load_klass(tmp, src);
2301 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2302 __ load_klass(tmp, dst);
2303 }
2304 int lh_offset = in_bytes(Klass::layout_helper_offset());
2305
2306 __ ldr_u32(tmp2, Address(tmp, lh_offset));
2307
2308 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2309 __ mov_slow(tmp, objArray_lh);
2310 __ cmp_32(tmp, tmp2);
2311 __ b(*stub->entry(), ne);
2312 }
2313
2314 save_in_reserved_area(R0, R1, R2, R3);
2315
2316 Register src_ptr = R0;
2317 Register dst_ptr = R1;
2318 Register len = R2;
2319 Register chk_off = R3;
2320 Register super_k = tmp;
2321
2322 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2323 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2324
2325 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2326 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2327 __ load_klass(tmp, dst);
2328
2329 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2330 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2331
2332 __ ldr(super_k, Address(tmp, ek_offset));
2333
2334 __ mov(len, length);
2335 __ ldr_u32(chk_off, Address(super_k, sco_offset));
2336 __ push(super_k);
2337
2338 __ call(copyfunc_addr, relocInfo::runtime_call_type);
2339
2340 #ifndef PRODUCT
2341 if (PrintC1Statistics) {
2342 Label failed;
2343 __ cbnz_32(R0, failed);
2344 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2);
2345 __ bind(failed);
2346 }
2347 #endif // PRODUCT
2348
2349 __ add(SP, SP, wordSize); // Drop super_k argument
2350
2351 __ cbz_32(R0, *stub->continuation());
2352 __ mvn_32(tmp, R0);
2353
2354 // load saved arguments in slow case only
2355 restore_from_reserved_area(R0, R1, R2, R3);
2356
2357 __ sub_32(length, length, tmp);
2358 __ add_32(src_pos, src_pos, tmp);
2359 __ add_32(dst_pos, dst_pos, tmp);
2360
2361 #ifndef PRODUCT
2362 if (PrintC1Statistics) {
2363 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2);
2364 }
2365 #endif
2366
2367 __ b(*stub->entry());
2368
2369 __ bind(cont);
2391 Register len = R2;
2392
2393 __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2394 __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2395
2396 __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2397 __ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2398
2399 __ mov(len, length);
2400
2401 __ call(entry, relocInfo::runtime_call_type);
2402
2403 __ bind(*stub->continuation());
2404 }
2405
2406 #ifdef ASSERT
2407 // emit run-time assertion
2408 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2409 assert(op->code() == lir_assert, "must be");
2410
2411 if (op->in_opr1()->is_valid()) {
2412 assert(op->in_opr2()->is_valid(), "both operands must be valid");
2413 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2414 } else {
2415 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2416 assert(op->condition() == lir_cond_always, "no other conditions allowed");
2417 }
2418
2419 Label ok;
2420 if (op->condition() != lir_cond_always) {
2421 AsmCondition acond = al;
2422 switch (op->condition()) {
2423 case lir_cond_equal: acond = eq; break;
2424 case lir_cond_notEqual: acond = ne; break;
2425 case lir_cond_less: acond = lt; break;
2426 case lir_cond_lessEqual: acond = le; break;
2427 case lir_cond_greaterEqual: acond = ge; break;
2428 case lir_cond_greater: acond = gt; break;
2429 case lir_cond_aboveEqual: acond = hs; break;
2430 case lir_cond_belowEqual: acond = ls; break;
2431 default: ShouldNotReachHere();
2432 }
2433 __ b(ok, acond);
2434 }
2435 if (op->halt()) {
2436 const char* str = __ code_string(op->msg());
2437 __ stop(str);
2438 } else {
2439 breakpoint();
2440 }
2441 __ bind(ok);
2442 }
2443 #endif // ASSERT
2444
2445 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2446 fatal("CRC32 intrinsic is not implemented on this platform");
2447 }
2448
2449 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2450 Register obj = op->obj_opr()->as_pointer_register();
2451 Register hdr = op->hdr_opr()->as_pointer_register();
2452 Register lock = op->lock_opr()->as_pointer_register();
2453 Register tmp = op->scratch_opr()->is_illegal() ? noreg :
2454 op->scratch_opr()->as_pointer_register();
2455
2456 if (!UseFastLocking) {
2457 __ b(*op->stub()->entry());
2458 } else if (op->code() == lir_lock) {
2459 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2460 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2461 if (op->info() != NULL) {
2470 }
2471
2472
2473 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2474 ciMethod* method = op->profiled_method();
2475 int bci = op->profiled_bci();
2476 ciMethod* callee = op->profiled_callee();
2477
2478 // Update counter for all call types
2479 ciMethodData* md = method->method_data_or_null();
2480 assert(md != NULL, "Sanity");
2481 ciProfileData* data = md->bci_to_data(bci);
2482 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2483 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2484 Register mdo = op->mdo()->as_register();
2485 assert(op->tmp1()->is_register(), "tmp1 must be allocated");
2486 Register tmp1 = op->tmp1()->as_pointer_register();
2487 assert_different_registers(mdo, tmp1);
2488 __ mov_metadata(mdo, md->constant_encoding());
2489 int mdo_offset_bias = 0;
2490 int max_offset = 4096;
2491 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) {
2492 // The offset is large so bias the mdo by the base of the slot so
2493 // that the ldr can use an immediate offset to reference the slots of the data
2494 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2495 __ mov_slow(tmp1, mdo_offset_bias);
2496 __ add(mdo, mdo, tmp1);
2497 }
2498
2499 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2500 // Perform additional virtual call profiling for invokevirtual and
2501 // invokeinterface bytecodes
2502 if (op->should_profile_receiver_type()) {
2503 assert(op->recv()->is_single_cpu(), "recv must be allocated");
2504 Register recv = op->recv()->as_register();
2505 assert_different_registers(mdo, tmp1, recv);
2506 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2507 ciKlass* known_klass = op->known_holder();
2508 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2509 // We know the type that will be seen at this call site; we can
2510 // statically update the MethodData* rather than needing to do
2566 __ str(tmp1, counter_addr);
2567 }
2568 }
2569
2570 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2571 fatal("Type profiling not implemented on this platform");
2572 }
2573
2574 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2575 Unimplemented();
2576 }
2577
2578
2579 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2580 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2581 __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp());
2582 }
2583
2584
2585 void LIR_Assembler::align_backward_branch_target() {
2586 // Some ARM processors do better with 8-byte branch target alignment
2587 __ align(8);
2588 }
2589
2590
2591 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2592
2593 if (left->is_single_cpu()) {
2594 assert (dest->type() == T_INT, "unexpected result type");
2595 assert (left->type() == T_INT, "unexpected left type");
2596 __ neg_32(dest->as_register(), left->as_register());
2597 } else if (left->is_double_cpu()) {
2598 Register dest_lo = dest->as_register_lo();
2599 Register dest_hi = dest->as_register_hi();
2600 Register src_lo = left->as_register_lo();
2601 Register src_hi = left->as_register_hi();
2602 if (dest_lo == src_hi) {
2603 dest_lo = Rtemp;
2604 }
2605 __ rsbs(dest_lo, src_lo, 0);
2606 __ rsc(dest_hi, src_hi, 0);
2607 move_regs(dest_lo, dest->as_register_lo());
2608 } else if (left->is_single_fpu()) {
2609 __ neg_float(dest->as_float_reg(), left->as_float_reg());
2610 } else if (left->is_double_fpu()) {
2611 __ neg_double(dest->as_double_reg(), left->as_double_reg());
2612 } else {
2613 ShouldNotReachHere();
2614 }
2615 }
2616
2617
2618 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2619 assert(patch_code == lir_patch_none, "Patch code not supported");
2620 LIR_Address* addr = addr_opr->as_address_ptr();
2621 if (addr->index()->is_illegal()) {
2622 jint c = addr->disp();
2623 if (!Assembler::is_arith_imm_in_range(c)) {
2624 BAILOUT("illegal arithmetic operand");
2625 }
2626 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c);
2627 } else {
2628 assert(addr->disp() == 0, "cannot handle otherwise");
2629 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(),
2630 AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale()));
2631 }
2632 }
2633
2634
2635 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2636 assert(!tmp->is_valid(), "don't need temporary");
2637 __ call(dest);
2638 if (info != NULL) {
2639 add_call_info_here(info);
2640 }
2641 }
2642
2643
2644 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2645 assert(src->is_double_cpu() && dest->is_address() ||
2646 src->is_address() && dest->is_double_cpu(),
2647 "Simple move_op is called for all other cases");
2648
2649 int null_check_offset;
2650 if (dest->is_address()) {
2651 // Store
2652 const LIR_Address* addr = dest->as_address_ptr();
2653 const Register src_lo = src->as_register_lo();
2654 const Register src_hi = src->as_register_hi();
2655 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
2656
2657 if (src_lo < src_hi) {
2658 null_check_offset = __ offset();
2659 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi));
2660 } else {
2661 assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register");
2662 __ mov(Rtemp, src_hi);
2663 null_check_offset = __ offset();
2664 __ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp));
2666 } else {
2667 // Load
2668 const LIR_Address* addr = src->as_address_ptr();
2669 const Register dest_lo = dest->as_register_lo();
2670 const Register dest_hi = dest->as_register_hi();
2671 assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
2672
2673 null_check_offset = __ offset();
2674 if (dest_lo < dest_hi) {
2675 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi));
2676 } else {
2677 assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register");
2678 __ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp));
2679 __ mov(dest_hi, Rtemp);
2680 }
2681 }
2682
2683 if (info != NULL) {
2684 add_debug_info_for_null_check(null_check_offset, info);
2685 }
2686 }
2687
2688
2689 void LIR_Assembler::membar() {
2690 __ membar(MacroAssembler::StoreLoad, Rtemp);
2691 }
2692
2693 void LIR_Assembler::membar_acquire() {
2694 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
2695 }
2696
2697 void LIR_Assembler::membar_release() {
2698 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
2699 }
2700
2701 void LIR_Assembler::membar_loadload() {
2702 __ membar(MacroAssembler::LoadLoad, Rtemp);
2703 }
2704
2705 void LIR_Assembler::membar_storestore() {
2707 }
2708
2709 void LIR_Assembler::membar_loadstore() {
2710 __ membar(MacroAssembler::LoadStore, Rtemp);
2711 }
2712
2713 void LIR_Assembler::membar_storeload() {
2714 __ membar(MacroAssembler::StoreLoad, Rtemp);
2715 }
2716
2717 void LIR_Assembler::on_spin_wait() {
2718 Unimplemented();
2719 }
2720
2721 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2722 // Not used on ARM
2723 Unimplemented();
2724 }
2725
2726 void LIR_Assembler::peephole(LIR_List* lir) {
2727 LIR_OpList* inst = lir->instructions_list();
2728 const int inst_length = inst->length();
2729 for (int i = 0; i < inst_length; i++) {
2730 LIR_Op* op = inst->at(i);
2731 switch (op->code()) {
2732 case lir_cmp: {
2733 // Replace:
2734 // cmp rX, y
2735 // cmove [EQ] y, z, rX
2736 // with
2737 // cmp rX, y
2738 // cmove [EQ] illegalOpr, z, rX
2739 //
2740 // or
2741 // cmp rX, y
2742 // cmove [NE] z, y, rX
2743 // with
2744 // cmp rX, y
2745 // cmove [NE] z, illegalOpr, rX
2746 //
2770 if (cmp_res != LIR_OprFact::illegalOpr) {
2771 LIR_Condition cond = cmove->condition();
2772 if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) {
2773 cmove->set_in_opr1(LIR_OprFact::illegalOpr);
2774 } else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) {
2775 cmove->set_in_opr2(LIR_OprFact::illegalOpr);
2776 }
2777 }
2778 }
2779 }
2780 break;
2781 }
2782
2783 default:
2784 break;
2785 }
2786 }
2787 }
2788
2789 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2790 assert(src->is_address(), "sanity");
2791 Address addr = as_Address(src->as_address_ptr());
2792
2793 if (code == lir_xchg) {
2794 } else {
2795 assert (!data->is_oop(), "xadd for oops");
2796 }
2797
2798 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
2799
2800 Label retry;
2801 __ bind(retry);
2802
2803 if (data->type() == T_INT || data->is_oop()) {
2804 Register dst = dest->as_register();
2805 Register new_val = noreg;
2806 __ ldrex(dst, addr);
2807 if (code == lir_xadd) {
2808 Register tmp_reg = tmp->as_register();
2809 if (data->is_constant()) {
2810 assert_different_registers(dst, tmp_reg);
2811 __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint());
2812 } else {
2813 assert_different_registers(dst, tmp_reg, data->as_register());
2814 __ add_32(tmp_reg, dst, data->as_register());
2815 }
2816 new_val = tmp_reg;
2817 } else {
2818 if (UseCompressedOops && data->is_oop()) {
2819 new_val = tmp->as_pointer_register();
2820 } else {
2821 new_val = data->as_register();
2822 }
2823 assert_different_registers(dst, new_val);
2824 }
2825 __ strex(Rtemp, new_val, addr);
2826
2827 } else if (data->type() == T_LONG) {
2828 Register dst_lo = dest->as_register_lo();
2829 Register new_val_lo = noreg;
2830 Register dst_hi = dest->as_register_hi();
2831
2832 assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair");
2833 assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair");
2834
2835 __ bind(retry);
2836 __ ldrexd(dst_lo, addr);
2837 if (code == lir_xadd) {
2838 Register tmp_lo = tmp->as_register_lo();
2839 Register tmp_hi = tmp->as_register_hi();
2840
2841 assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
2842 assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
2843
2844 if (data->is_constant()) {
2845 jlong c = data->as_constant_ptr()->as_jlong();
2846 assert((jlong)((jint)c) == c, "overflow");
2847 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi);
2848 __ adds(tmp_lo, dst_lo, (jint)c);
2849 __ adc(tmp_hi, dst_hi, 0);
2850 } else {
2851 Register new_val_lo = data->as_register_lo();
2852 Register new_val_hi = data->as_register_hi();
2853 __ adds(tmp_lo, dst_lo, new_val_lo);
2854 __ adc(tmp_hi, dst_hi, new_val_hi);
2855 assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi);
2856 }
2857 new_val_lo = tmp_lo;
2858 } else {
2859 new_val_lo = data->as_register_lo();
2860 Register new_val_hi = data->as_register_hi();
2861
2862 assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi);
2863 assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair");
2864 assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair");
2865 }
2866 __ strexd(Rtemp, new_val_lo, addr);
2867 } else {
2868 ShouldNotReachHere();
2869 }
2870
2871 __ cbnz_32(Rtemp, retry);
2872 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
2873
2874 }
2875
2876 #undef __
|