472 ffree(0);
473 }
474 }
475 #endif // X86 && TIERED
476 }
477
478
479 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
480 _masm->bind (*(op->label()));
481 }
482
483
484 void LIR_Assembler::emit_op1(LIR_Op1* op) {
485 switch (op->code()) {
486 case lir_move:
487 if (op->move_kind() == lir_move_volatile) {
488 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
489 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
490 } else {
491 move_op(op->in_opr(), op->result_opr(), op->type(),
492 op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
493 }
494 break;
495
496 case lir_prefetchr:
497 prefetchr(op->in_opr());
498 break;
499
500 case lir_prefetchw:
501 prefetchw(op->in_opr());
502 break;
503
504 case lir_roundfp: {
505 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
506 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
507 break;
508 }
509
510 case lir_return:
511 return_op(op->in_opr());
512 break;
741 Unimplemented();
742 break;
743 }
744 }
745
746
747 void LIR_Assembler::build_frame() {
748 _masm->build_frame(initial_frame_size_in_bytes());
749 }
750
751
752 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
753 assert((src->is_single_fpu() && dest->is_single_stack()) ||
754 (src->is_double_fpu() && dest->is_double_stack()),
755 "round_fp: rounds register -> stack location");
756
757 reg2stack (src, dest, src->type(), pop_fpu_stack);
758 }
759
760
761 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
762 if (src->is_register()) {
763 if (dest->is_register()) {
764 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
765 reg2reg(src, dest);
766 } else if (dest->is_stack()) {
767 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
768 reg2stack(src, dest, type, pop_fpu_stack);
769 } else if (dest->is_address()) {
770 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
771 } else {
772 ShouldNotReachHere();
773 }
774
775 } else if (src->is_stack()) {
776 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
777 if (dest->is_register()) {
778 stack2reg(src, dest, type);
779 } else if (dest->is_stack()) {
780 stack2stack(src, dest, type);
781 } else {
782 ShouldNotReachHere();
783 }
784
785 } else if (src->is_constant()) {
786 if (dest->is_register()) {
787 const2reg(src, dest, patch_code, info); // patching is possible
788 } else if (dest->is_stack()) {
789 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
790 const2stack(src, dest);
791 } else if (dest->is_address()) {
792 assert(patch_code == lir_patch_none, "no patching allowed here");
793 const2mem(src, dest, type, info);
794 } else {
795 ShouldNotReachHere();
796 }
797
798 } else if (src->is_address()) {
799 mem2reg(src, dest, type, patch_code, info, unaligned);
800
801 } else {
802 ShouldNotReachHere();
803 }
804 }
805
806
807 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
808 #ifndef PRODUCT
809 if (VerifyOopMaps || VerifyOops) {
810 bool v = VerifyOops;
811 VerifyOops = true;
812 OopMapStream s(info->oop_map());
813 while (!s.is_done()) {
814 OopMapValue v = s.current();
815 if (v.is_oop()) {
816 VMReg r = v.reg();
817 if (!r->is_stack()) {
818 stringStream st;
819 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
|
472 ffree(0);
473 }
474 }
475 #endif // X86 && TIERED
476 }
477
478
479 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
480 _masm->bind (*(op->label()));
481 }
482
483
484 void LIR_Assembler::emit_op1(LIR_Op1* op) {
485 switch (op->code()) {
486 case lir_move:
487 if (op->move_kind() == lir_move_volatile) {
488 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
489 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
490 } else {
491 move_op(op->in_opr(), op->result_opr(), op->type(),
492 op->patch_code(), op->info(), op->pop_fpu_stack(),
493 op->move_kind() == lir_move_unaligned,
494 op->move_kind() == lir_move_wide);
495 }
496 break;
497
498 case lir_prefetchr:
499 prefetchr(op->in_opr());
500 break;
501
502 case lir_prefetchw:
503 prefetchw(op->in_opr());
504 break;
505
506 case lir_roundfp: {
507 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
508 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
509 break;
510 }
511
512 case lir_return:
513 return_op(op->in_opr());
514 break;
743 Unimplemented();
744 break;
745 }
746 }
747
748
749 void LIR_Assembler::build_frame() {
750 _masm->build_frame(initial_frame_size_in_bytes());
751 }
752
753
754 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
755 assert((src->is_single_fpu() && dest->is_single_stack()) ||
756 (src->is_double_fpu() && dest->is_double_stack()),
757 "round_fp: rounds register -> stack location");
758
759 reg2stack (src, dest, src->type(), pop_fpu_stack);
760 }
761
762
763 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
764 if (src->is_register()) {
765 if (dest->is_register()) {
766 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
767 reg2reg(src, dest);
768 } else if (dest->is_stack()) {
769 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
770 reg2stack(src, dest, type, pop_fpu_stack);
771 } else if (dest->is_address()) {
772 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
773 } else {
774 ShouldNotReachHere();
775 }
776
777 } else if (src->is_stack()) {
778 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
779 if (dest->is_register()) {
780 stack2reg(src, dest, type);
781 } else if (dest->is_stack()) {
782 stack2stack(src, dest, type);
783 } else {
784 ShouldNotReachHere();
785 }
786
787 } else if (src->is_constant()) {
788 if (dest->is_register()) {
789 const2reg(src, dest, patch_code, info); // patching is possible
790 } else if (dest->is_stack()) {
791 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
792 const2stack(src, dest);
793 } else if (dest->is_address()) {
794 assert(patch_code == lir_patch_none, "no patching allowed here");
795 const2mem(src, dest, type, info, wide);
796 } else {
797 ShouldNotReachHere();
798 }
799
800 } else if (src->is_address()) {
801 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
802
803 } else {
804 ShouldNotReachHere();
805 }
806 }
807
808
809 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
810 #ifndef PRODUCT
811 if (VerifyOopMaps || VerifyOops) {
812 bool v = VerifyOops;
813 VerifyOops = true;
814 OopMapStream s(info->oop_map());
815 while (!s.is_done()) {
816 OopMapValue v = s.current();
817 if (v.is_oop()) {
818 VMReg r = v.reg();
819 if (!r->is_stack()) {
820 stringStream st;
821 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
|