173
174 void LIR_Assembler::emit_slow_case_stubs() {
175 emit_stubs(_slow_case_stubs);
176 }
177
178
179 bool LIR_Assembler::needs_icache(ciMethod* method) const {
180 return !method->is_static();
181 }
182
183
184 int LIR_Assembler::code_offset() const {
185 return _masm->offset();
186 }
187
188
189 address LIR_Assembler::pc() const {
190 return _masm->pc();
191 }
192
193
194 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
195 for (int i = 0; i < info_list->length(); i++) {
196 XHandlers* handlers = info_list->at(i)->exception_handlers();
197
198 for (int j = 0; j < handlers->length(); j++) {
199 XHandler* handler = handlers->handler_at(j);
200 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
201 assert(handler->entry_code() == NULL ||
202 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
203 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
204
205 if (handler->entry_pco() == -1) {
206 // entry code not emitted yet
207 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
208 handler->set_entry_pco(code_offset());
209 if (CommentedAssembly) {
210 _masm->block_comment("Exception adapter block");
211 }
212 emit_lir_list(handler->entry_code());
780 op->result_opr());
781 break;
782
783 case lir_throw:
784 throw_op(op->in_opr1(), op->in_opr2(), op->info());
785 break;
786
787 case lir_xadd:
788 case lir_xchg:
789 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
790 break;
791
792 default:
793 Unimplemented();
794 break;
795 }
796 }
797
798
799 void LIR_Assembler::build_frame() {
800 _masm->build_frame(initial_frame_size_in_bytes());
801 }
802
803
804 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
805 assert((src->is_single_fpu() && dest->is_single_stack()) ||
806 (src->is_double_fpu() && dest->is_double_stack()),
807 "round_fp: rounds register -> stack location");
808
809 reg2stack (src, dest, src->type(), pop_fpu_stack);
810 }
811
812
813 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
814 if (src->is_register()) {
815 if (dest->is_register()) {
816 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
817 reg2reg(src, dest);
818 } else if (dest->is_stack()) {
819 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
820 reg2stack(src, dest, type, pop_fpu_stack);
|
173
174 void LIR_Assembler::emit_slow_case_stubs() {
175 emit_stubs(_slow_case_stubs);
176 }
177
178
179 bool LIR_Assembler::needs_icache(ciMethod* method) const {
180 return !method->is_static();
181 }
182
183
184 int LIR_Assembler::code_offset() const {
185 return _masm->offset();
186 }
187
188
189 address LIR_Assembler::pc() const {
190 return _masm->pc();
191 }
192
193 // To bang the stack of this compiled method we use the stack size
194 // that the interpreter would need in case of a deoptimization. This
195 // removes the need to bang the stack in the deoptimization blob which
196 // in turn simplifies stack overflow handling.
197 int LIR_Assembler::bang_size_in_bytes() const {
198 return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
199 }
200
201 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
202 for (int i = 0; i < info_list->length(); i++) {
203 XHandlers* handlers = info_list->at(i)->exception_handlers();
204
205 for (int j = 0; j < handlers->length(); j++) {
206 XHandler* handler = handlers->handler_at(j);
207 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
208 assert(handler->entry_code() == NULL ||
209 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
210 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
211
212 if (handler->entry_pco() == -1) {
213 // entry code not emitted yet
214 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
215 handler->set_entry_pco(code_offset());
216 if (CommentedAssembly) {
217 _masm->block_comment("Exception adapter block");
218 }
219 emit_lir_list(handler->entry_code());
787 op->result_opr());
788 break;
789
790 case lir_throw:
791 throw_op(op->in_opr1(), op->in_opr2(), op->info());
792 break;
793
794 case lir_xadd:
795 case lir_xchg:
796 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
797 break;
798
799 default:
800 Unimplemented();
801 break;
802 }
803 }
804
805
806 void LIR_Assembler::build_frame() {
807 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
808 }
809
810
811 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
812 assert((src->is_single_fpu() && dest->is_single_stack()) ||
813 (src->is_double_fpu() && dest->is_double_stack()),
814 "round_fp: rounds register -> stack location");
815
816 reg2stack (src, dest, src->type(), pop_fpu_stack);
817 }
818
819
820 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
821 if (src->is_register()) {
822 if (dest->is_register()) {
823 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
824 reg2reg(src, dest);
825 } else if (dest->is_stack()) {
826 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
827 reg2stack(src, dest, type, pop_fpu_stack);
|