1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_Instruction.hpp" 28 #include "c1/c1_InstructionPrinter.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciInstance.hpp" 33 #ifdef TARGET_ARCH_x86 34 # include "nativeInst_x86.hpp" 35 # include "vmreg_x86.inline.hpp" 36 #endif 37 #ifdef TARGET_ARCH_sparc 38 # include "nativeInst_sparc.hpp" 39 # include "vmreg_sparc.inline.hpp" 40 #endif 41 #ifdef TARGET_ARCH_zero 42 # include "nativeInst_zero.hpp" 43 # include "vmreg_zero.inline.hpp" 44 #endif 45 #ifdef TARGET_ARCH_arm 46 # include "nativeInst_arm.hpp" 47 # include "vmreg_arm.inline.hpp" 48 #endif 49 #ifdef TARGET_ARCH_ppc 50 # include "nativeInst_ppc.hpp" 51 # include "vmreg_ppc.inline.hpp" 52 #endif 53 54 55 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 56 // we must have enough patching space so that call can be inserted 57 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) { 58 _masm->nop(); 59 } 60 patch->install(_masm, patch_code, obj, info); 61 append_patching_stub(patch); 62 63 #ifdef ASSERT 64 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 65 if (patch->id() == PatchingStub::access_field_id) { 66 switch (code) { 67 case Bytecodes::_putstatic: 68 case Bytecodes::_getstatic: 69 case Bytecodes::_putfield: 70 case Bytecodes::_getfield: 71 break; 72 default: 73 ShouldNotReachHere(); 74 } 75 } else if (patch->id() == PatchingStub::load_klass_id) { 76 switch (code) { 77 case Bytecodes::_new: 78 case Bytecodes::_anewarray: 79 case Bytecodes::_multianewarray: 80 case Bytecodes::_instanceof: 81 case Bytecodes::_checkcast: 82 break; 83 default: 84 ShouldNotReachHere(); 85 } 86 } else if (patch->id() == PatchingStub::load_mirror_id) { 87 switch (code) { 88 case Bytecodes::_putstatic: 89 case Bytecodes::_getstatic: 90 case Bytecodes::_ldc: 91 case Bytecodes::_ldc_w: 92 break; 93 default: 94 ShouldNotReachHere(); 95 } 96 } else if (patch->id() == PatchingStub::load_appendix_id) { 97 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); 98 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); 99 } else { 100 ShouldNotReachHere(); 101 } 102 #endif 103 } 104 105 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { 106 IRScope* scope = info->scope(); 107 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); 108 if (Bytecodes::has_optional_appendix(bc_raw)) { 109 return PatchingStub::load_appendix_id; 110 } 111 return PatchingStub::load_mirror_id; 112 } 113 114 //--------------------------------------------------------------- 115 116 117 LIR_Assembler::LIR_Assembler(Compilation* c): 118 _compilation(c) 119 , _masm(c->masm()) 120 , _bs(Universe::heap()->barrier_set()) 121 , _frame_map(c->frame_map()) 122 , _current_block(NULL) 123 , _pending_non_safepoint(NULL) 124 , _pending_non_safepoint_offset(0) 125 { 126 _slow_case_stubs = new CodeStubList(); 127 } 128 129 130 LIR_Assembler::~LIR_Assembler() { 131 } 132 133 134 void LIR_Assembler::append_patching_stub(PatchingStub* stub) { 135 _slow_case_stubs->append(stub); 136 } 137 138 139 void LIR_Assembler::check_codespace() { 140 CodeSection* cs = _masm->code_section(); 141 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 142 BAILOUT("CodeBuffer overflow"); 143 } 144 } 145 146 147 void LIR_Assembler::emit_code_stub(CodeStub* stub) { 148 _slow_case_stubs->append(stub); 149 } 150 151 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 152 for (int m = 0; m < stub_list->length(); m++) { 153 CodeStub* s = (*stub_list)[m]; 154 155 check_codespace(); 156 CHECK_BAILOUT(); 157 158 #ifndef PRODUCT 159 if (CommentedAssembly) { 160 stringStream st; 161 s->print_name(&st); 162 st.print(" slow case"); 163 _masm->block_comment(st.as_string()); 164 } 165 #endif 166 s->emit_code(this); 167 #ifdef ASSERT 168 s->assert_no_unbound_labels(); 169 #endif 170 } 171 } 172 173 174 void LIR_Assembler::emit_slow_case_stubs() { 175 emit_stubs(_slow_case_stubs); 176 } 177 178 179 bool LIR_Assembler::needs_icache(ciMethod* method) const { 180 return !method->is_static(); 181 } 182 183 184 int LIR_Assembler::code_offset() const { 185 return _masm->offset(); 186 } 187 188 189 address LIR_Assembler::pc() const { 190 return _masm->pc(); 191 } 192 193 // To bang the stack of this compiled method we use the stack size 194 // that the interpreter would need in case of a deoptimization. This 195 // removes the need to bang the stack in the deoptimization blob which 196 // in turn simplifies stack overflow handling. 197 int LIR_Assembler::bang_size_in_bytes() const { 198 return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size()); 199 } 200 201 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 202 for (int i = 0; i < info_list->length(); i++) { 203 XHandlers* handlers = info_list->at(i)->exception_handlers(); 204 205 for (int j = 0; j < handlers->length(); j++) { 206 XHandler* handler = handlers->handler_at(j); 207 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 208 assert(handler->entry_code() == NULL || 209 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 210 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 211 212 if (handler->entry_pco() == -1) { 213 // entry code not emitted yet 214 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 215 handler->set_entry_pco(code_offset()); 216 if (CommentedAssembly) { 217 _masm->block_comment("Exception adapter block"); 218 } 219 emit_lir_list(handler->entry_code()); 220 } else { 221 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 222 } 223 224 assert(handler->entry_pco() != -1, "must be set now"); 225 } 226 } 227 } 228 } 229 230 231 void LIR_Assembler::emit_code(BlockList* hir) { 232 if (PrintLIR) { 233 print_LIR(hir); 234 } 235 236 int n = hir->length(); 237 for (int i = 0; i < n; i++) { 238 emit_block(hir->at(i)); 239 CHECK_BAILOUT(); 240 } 241 242 flush_debug_info(code_offset()); 243 244 DEBUG_ONLY(check_no_unbound_labels()); 245 } 246 247 248 void LIR_Assembler::emit_block(BlockBegin* block) { 249 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 250 align_backward_branch_target(); 251 } 252 253 // if this block is the start of an exception handler, record the 254 // PC offset of the first instruction for later construction of 255 // the ExceptionHandlerTable 256 if (block->is_set(BlockBegin::exception_entry_flag)) { 257 block->set_exception_handler_pco(code_offset()); 258 } 259 260 #ifndef PRODUCT 261 if (PrintLIRWithAssembly) { 262 // don't print Phi's 263 InstructionPrinter ip(false); 264 block->print(ip); 265 } 266 #endif /* PRODUCT */ 267 268 assert(block->lir() != NULL, "must have LIR"); 269 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 270 271 #ifndef PRODUCT 272 if (CommentedAssembly) { 273 stringStream st; 274 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 275 _masm->block_comment(st.as_string()); 276 } 277 #endif 278 279 emit_lir_list(block->lir()); 280 281 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 282 } 283 284 285 void LIR_Assembler::emit_lir_list(LIR_List* list) { 286 peephole(list); 287 288 int n = list->length(); 289 for (int i = 0; i < n; i++) { 290 LIR_Op* op = list->at(i); 291 292 check_codespace(); 293 CHECK_BAILOUT(); 294 295 #ifndef PRODUCT 296 if (CommentedAssembly) { 297 // Don't record out every op since that's too verbose. Print 298 // branches since they include block and stub names. Also print 299 // patching moves since they generate funny looking code. 300 if (op->code() == lir_branch || 301 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) { 302 stringStream st; 303 op->print_on(&st); 304 _masm->block_comment(st.as_string()); 305 } 306 } 307 if (PrintLIRWithAssembly) { 308 // print out the LIR operation followed by the resulting assembly 309 list->at(i)->print(); tty->cr(); 310 } 311 #endif /* PRODUCT */ 312 313 op->emit_code(this); 314 315 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 316 process_debug_info(op); 317 } 318 319 #ifndef PRODUCT 320 if (PrintLIRWithAssembly) { 321 _masm->code()->decode(); 322 } 323 #endif /* PRODUCT */ 324 } 325 } 326 327 #ifdef ASSERT 328 void LIR_Assembler::check_no_unbound_labels() { 329 CHECK_BAILOUT(); 330 331 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 332 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 333 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 334 assert(false, "unbound label"); 335 } 336 } 337 } 338 #endif 339 340 //----------------------------------debug info-------------------------------- 341 342 343 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 344 _masm->code_section()->relocate(pc(), relocInfo::poll_type); 345 int pc_offset = code_offset(); 346 flush_debug_info(pc_offset); 347 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 348 if (info->exception_handlers() != NULL) { 349 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 350 } 351 } 352 353 354 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 355 flush_debug_info(pc_offset); 356 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 357 if (cinfo->exception_handlers() != NULL) { 358 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 359 } 360 } 361 362 static ValueStack* debug_info(Instruction* ins) { 363 StateSplit* ss = ins->as_StateSplit(); 364 if (ss != NULL) return ss->state(); 365 return ins->state_before(); 366 } 367 368 void LIR_Assembler::process_debug_info(LIR_Op* op) { 369 Instruction* src = op->source(); 370 if (src == NULL) return; 371 int pc_offset = code_offset(); 372 if (_pending_non_safepoint == src) { 373 _pending_non_safepoint_offset = pc_offset; 374 return; 375 } 376 ValueStack* vstack = debug_info(src); 377 if (vstack == NULL) return; 378 if (_pending_non_safepoint != NULL) { 379 // Got some old debug info. Get rid of it. 380 if (debug_info(_pending_non_safepoint) == vstack) { 381 _pending_non_safepoint_offset = pc_offset; 382 return; 383 } 384 if (_pending_non_safepoint_offset < pc_offset) { 385 record_non_safepoint_debug_info(); 386 } 387 _pending_non_safepoint = NULL; 388 } 389 // Remember the debug info. 390 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 391 _pending_non_safepoint = src; 392 _pending_non_safepoint_offset = pc_offset; 393 } 394 } 395 396 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 397 // Return NULL if n is too large. 398 // Returns the caller_bci for the next-younger state, also. 399 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 400 ValueStack* t = s; 401 for (int i = 0; i < n; i++) { 402 if (t == NULL) break; 403 t = t->caller_state(); 404 } 405 if (t == NULL) return NULL; 406 for (;;) { 407 ValueStack* tc = t->caller_state(); 408 if (tc == NULL) return s; 409 t = tc; 410 bci_result = tc->bci(); 411 s = s->caller_state(); 412 } 413 } 414 415 void LIR_Assembler::record_non_safepoint_debug_info() { 416 int pc_offset = _pending_non_safepoint_offset; 417 ValueStack* vstack = debug_info(_pending_non_safepoint); 418 int bci = vstack->bci(); 419 420 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 421 assert(debug_info->recording_non_safepoints(), "sanity"); 422 423 debug_info->add_non_safepoint(pc_offset); 424 425 // Visit scopes from oldest to youngest. 426 for (int n = 0; ; n++) { 427 int s_bci = bci; 428 ValueStack* s = nth_oldest(vstack, n, s_bci); 429 if (s == NULL) break; 430 IRScope* scope = s->scope(); 431 //Always pass false for reexecute since these ScopeDescs are never used for deopt 432 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/); 433 } 434 435 debug_info->end_non_safepoint(pc_offset); 436 } 437 438 439 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 440 add_debug_info_for_null_check(code_offset(), cinfo); 441 } 442 443 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 444 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 445 emit_code_stub(stub); 446 } 447 448 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 449 add_debug_info_for_div0(code_offset(), info); 450 } 451 452 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 453 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 454 emit_code_stub(stub); 455 } 456 457 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 458 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 459 } 460 461 462 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 463 verify_oop_map(op->info()); 464 465 if (os::is_MP()) { 466 // must align calls sites, otherwise they can't be updated atomically on MP hardware 467 align_call(op->code()); 468 } 469 470 // emit the static call stub stuff out of line 471 emit_static_call_stub(); 472 473 switch (op->code()) { 474 case lir_static_call: 475 case lir_dynamic_call: 476 call(op, relocInfo::static_call_type); 477 break; 478 case lir_optvirtual_call: 479 call(op, relocInfo::opt_virtual_call_type); 480 break; 481 case lir_icvirtual_call: 482 ic_call(op); 483 break; 484 case lir_virtual_call: 485 vtable_call(op); 486 break; 487 default: 488 fatal(err_msg_res("unexpected op code: %s", op->name())); 489 break; 490 } 491 492 // JSR 292 493 // Record if this method has MethodHandle invokes. 494 if (op->is_method_handle_invoke()) { 495 compilation()->set_has_method_handle_invokes(true); 496 } 497 498 #if defined(X86) && defined(TIERED) 499 // C2 leave fpu stack dirty clean it 500 if (UseSSE < 2) { 501 int i; 502 for ( i = 1; i <= 7 ; i++ ) { 503 ffree(i); 504 } 505 if (!op->result_opr()->is_float_kind()) { 506 ffree(0); 507 } 508 } 509 #endif // X86 && TIERED 510 } 511 512 513 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 514 _masm->bind (*(op->label())); 515 } 516 517 518 void LIR_Assembler::emit_op1(LIR_Op1* op) { 519 switch (op->code()) { 520 case lir_move: 521 if (op->move_kind() == lir_move_volatile) { 522 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 523 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 524 } else { 525 move_op(op->in_opr(), op->result_opr(), op->type(), 526 op->patch_code(), op->info(), op->pop_fpu_stack(), 527 op->move_kind() == lir_move_unaligned, 528 op->move_kind() == lir_move_wide); 529 } 530 break; 531 532 case lir_prefetchr: 533 prefetchr(op->in_opr()); 534 break; 535 536 case lir_prefetchw: 537 prefetchw(op->in_opr()); 538 break; 539 540 case lir_roundfp: { 541 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 542 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 543 break; 544 } 545 546 case lir_return: 547 return_op(op->in_opr()); 548 break; 549 550 case lir_safepoint: 551 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 552 _masm->nop(); 553 } 554 safepoint_poll(op->in_opr(), op->info()); 555 break; 556 557 case lir_fxch: 558 fxch(op->in_opr()->as_jint()); 559 break; 560 561 case lir_fld: 562 fld(op->in_opr()->as_jint()); 563 break; 564 565 case lir_ffree: 566 ffree(op->in_opr()->as_jint()); 567 break; 568 569 case lir_branch: 570 break; 571 572 case lir_push: 573 push(op->in_opr()); 574 break; 575 576 case lir_pop: 577 pop(op->in_opr()); 578 break; 579 580 case lir_neg: 581 negate(op->in_opr(), op->result_opr()); 582 break; 583 584 case lir_leal: 585 leal(op->in_opr(), op->result_opr()); 586 break; 587 588 case lir_null_check: 589 if (GenerateCompilerNullChecks) { 590 add_debug_info_for_null_check_here(op->info()); 591 592 if (op->in_opr()->is_single_cpu()) { 593 _masm->null_check(op->in_opr()->as_register()); 594 } else { 595 Unimplemented(); 596 } 597 } 598 break; 599 600 case lir_monaddr: 601 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 602 break; 603 604 #ifdef SPARC 605 case lir_pack64: 606 pack64(op->in_opr(), op->result_opr()); 607 break; 608 609 case lir_unpack64: 610 unpack64(op->in_opr(), op->result_opr()); 611 break; 612 #endif 613 614 case lir_unwind: 615 unwind_op(op->in_opr()); 616 break; 617 618 default: 619 Unimplemented(); 620 break; 621 } 622 } 623 624 625 void LIR_Assembler::emit_op0(LIR_Op0* op) { 626 switch (op->code()) { 627 case lir_word_align: { 628 while (code_offset() % BytesPerWord != 0) { 629 _masm->nop(); 630 } 631 break; 632 } 633 634 case lir_nop: 635 assert(op->info() == NULL, "not supported"); 636 _masm->nop(); 637 break; 638 639 case lir_label: 640 Unimplemented(); 641 break; 642 643 case lir_build_frame: 644 build_frame(); 645 break; 646 647 case lir_std_entry: 648 // init offsets 649 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 650 _masm->align(CodeEntryAlignment); 651 if (needs_icache(compilation()->method())) { 652 check_icache(); 653 } 654 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 655 _masm->verified_entry(); 656 build_frame(); 657 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 658 break; 659 660 case lir_osr_entry: 661 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 662 osr_entry(); 663 break; 664 665 case lir_24bit_FPU: 666 set_24bit_FPU(); 667 break; 668 669 case lir_reset_FPU: 670 reset_FPU(); 671 break; 672 673 case lir_breakpoint: 674 breakpoint(); 675 break; 676 677 case lir_fpop_raw: 678 fpop(); 679 break; 680 681 case lir_membar: 682 membar(); 683 break; 684 685 case lir_membar_acquire: 686 membar_acquire(); 687 break; 688 689 case lir_membar_release: 690 membar_release(); 691 break; 692 693 case lir_membar_loadload: 694 membar_loadload(); 695 break; 696 697 case lir_membar_storestore: 698 membar_storestore(); 699 break; 700 701 case lir_membar_loadstore: 702 membar_loadstore(); 703 break; 704 705 case lir_membar_storeload: 706 membar_storeload(); 707 break; 708 709 case lir_get_thread: 710 get_thread(op->result_opr()); 711 break; 712 713 default: 714 ShouldNotReachHere(); 715 break; 716 } 717 } 718 719 720 void LIR_Assembler::emit_op2(LIR_Op2* op) { 721 switch (op->code()) { 722 case lir_cmp: 723 if (op->info() != NULL) { 724 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 725 "shouldn't be codeemitinfo for non-address operands"); 726 add_debug_info_for_null_check_here(op->info()); // exception possible 727 } 728 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 729 break; 730 731 case lir_cmp_l2i: 732 case lir_cmp_fd2i: 733 case lir_ucmp_fd2i: 734 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 735 break; 736 737 case lir_cmove: 738 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); 739 break; 740 741 case lir_shl: 742 case lir_shr: 743 case lir_ushr: 744 if (op->in_opr2()->is_constant()) { 745 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 746 } else { 747 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 748 } 749 break; 750 751 case lir_add: 752 case lir_sub: 753 case lir_mul: 754 case lir_mul_strictfp: 755 case lir_div: 756 case lir_div_strictfp: 757 case lir_rem: 758 assert(op->fpu_pop_count() < 2, ""); 759 arith_op( 760 op->code(), 761 op->in_opr1(), 762 op->in_opr2(), 763 op->result_opr(), 764 op->info(), 765 op->fpu_pop_count() == 1); 766 break; 767 768 case lir_abs: 769 case lir_sqrt: 770 case lir_sin: 771 case lir_tan: 772 case lir_cos: 773 case lir_log: 774 case lir_log10: 775 case lir_exp: 776 case lir_pow: 777 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 778 break; 779 780 case lir_logic_and: 781 case lir_logic_or: 782 case lir_logic_xor: 783 logic_op( 784 op->code(), 785 op->in_opr1(), 786 op->in_opr2(), 787 op->result_opr()); 788 break; 789 790 case lir_throw: 791 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 792 break; 793 794 case lir_xadd: 795 case lir_xchg: 796 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 797 break; 798 799 default: 800 Unimplemented(); 801 break; 802 } 803 } 804 805 806 void LIR_Assembler::build_frame() { 807 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 808 } 809 810 811 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 812 assert((src->is_single_fpu() && dest->is_single_stack()) || 813 (src->is_double_fpu() && dest->is_double_stack()), 814 "round_fp: rounds register -> stack location"); 815 816 reg2stack (src, dest, src->type(), pop_fpu_stack); 817 } 818 819 820 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 821 if (src->is_register()) { 822 if (dest->is_register()) { 823 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 824 reg2reg(src, dest); 825 } else if (dest->is_stack()) { 826 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 827 reg2stack(src, dest, type, pop_fpu_stack); 828 } else if (dest->is_address()) { 829 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 830 } else { 831 ShouldNotReachHere(); 832 } 833 834 } else if (src->is_stack()) { 835 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 836 if (dest->is_register()) { 837 stack2reg(src, dest, type); 838 } else if (dest->is_stack()) { 839 stack2stack(src, dest, type); 840 } else { 841 ShouldNotReachHere(); 842 } 843 844 } else if (src->is_constant()) { 845 if (dest->is_register()) { 846 const2reg(src, dest, patch_code, info); // patching is possible 847 } else if (dest->is_stack()) { 848 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 849 const2stack(src, dest); 850 } else if (dest->is_address()) { 851 assert(patch_code == lir_patch_none, "no patching allowed here"); 852 const2mem(src, dest, type, info, wide); 853 } else { 854 ShouldNotReachHere(); 855 } 856 857 } else if (src->is_address()) { 858 mem2reg(src, dest, type, patch_code, info, wide, unaligned); 859 860 } else { 861 ShouldNotReachHere(); 862 } 863 } 864 865 866 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 867 #ifndef PRODUCT 868 if (VerifyOops) { 869 OopMapStream s(info->oop_map()); 870 while (!s.is_done()) { 871 OopMapValue v = s.current(); 872 if (v.is_oop()) { 873 VMReg r = v.reg(); 874 if (!r->is_stack()) { 875 stringStream st; 876 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 877 #ifdef SPARC 878 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__); 879 #else 880 _masm->verify_oop(r->as_Register()); 881 #endif 882 } else { 883 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 884 } 885 } 886 check_codespace(); 887 CHECK_BAILOUT(); 888 889 s.next(); 890 } 891 } 892 #endif 893 }