1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_Instruction.hpp" 28 #include "c1/c1_InstructionPrinter.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciInstance.hpp" 33 #ifdef TARGET_ARCH_x86 34 # include "nativeInst_x86.hpp" 35 # include "vmreg_x86.inline.hpp" 36 #endif 37 #ifdef TARGET_ARCH_sparc 38 # include "nativeInst_sparc.hpp" 39 # include "vmreg_sparc.inline.hpp" 40 #endif 41 #ifdef TARGET_ARCH_zero 42 # include "nativeInst_zero.hpp" 43 # include "vmreg_zero.inline.hpp" 44 #endif 45 #ifdef TARGET_ARCH_arm 46 # include "nativeInst_arm.hpp" 47 # include "vmreg_arm.inline.hpp" 48 #endif 49 #ifdef TARGET_ARCH_ppc 50 # include "nativeInst_ppc.hpp" 51 # include "vmreg_ppc.inline.hpp" 52 #endif 53 54 55 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 56 // we must have enough patching space so that call can be inserted 57 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) { 58 _masm->nop(); 59 } 60 patch->install(_masm, patch_code, obj, info); 61 append_code_stub(patch); 62 63 #ifdef ASSERT 64 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 65 if (patch->id() == PatchingStub::access_field_id) { 66 switch (code) { 67 case Bytecodes::_putstatic: 68 case Bytecodes::_getstatic: 69 case Bytecodes::_putfield: 70 case Bytecodes::_getfield: 71 break; 72 default: 73 ShouldNotReachHere(); 74 } 75 } else if (patch->id() == PatchingStub::load_klass_id) { 76 switch (code) { 77 case Bytecodes::_new: 78 case Bytecodes::_anewarray: 79 case Bytecodes::_multianewarray: 80 case Bytecodes::_instanceof: 81 case Bytecodes::_checkcast: 82 break; 83 default: 84 ShouldNotReachHere(); 85 } 86 } else if (patch->id() == PatchingStub::load_mirror_id) { 87 switch (code) { 88 case Bytecodes::_putstatic: 89 case Bytecodes::_getstatic: 90 case Bytecodes::_ldc: 91 case Bytecodes::_ldc_w: 92 break; 93 default: 94 ShouldNotReachHere(); 95 } 96 } else if (patch->id() == PatchingStub::load_appendix_id) { 97 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); 98 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); 99 } else { 100 ShouldNotReachHere(); 101 } 102 #endif 103 } 104 105 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { 106 IRScope* scope = info->scope(); 107 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); 108 if (Bytecodes::has_optional_appendix(bc_raw)) { 109 return PatchingStub::load_appendix_id; 110 } 111 return PatchingStub::load_mirror_id; 112 } 113 114 //--------------------------------------------------------------- 115 116 117 LIR_Assembler::LIR_Assembler(Compilation* c): 118 _compilation(c) 119 , _masm(c->masm()) 120 , _bs(Universe::heap()->barrier_set()) 121 , _frame_map(c->frame_map()) 122 , _current_block(NULL) 123 , _pending_non_safepoint(NULL) 124 , _pending_non_safepoint_begin_offset(0) 125 , _pending_non_safepoint_end_offset(0) 126 { 127 _slow_case_stubs = new CodeStubList(); 128 } 129 130 131 LIR_Assembler::~LIR_Assembler() { 132 } 133 134 135 void LIR_Assembler::check_codespace() { 136 CodeSection* cs = _masm->code_section(); 137 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 138 BAILOUT("CodeBuffer overflow"); 139 } 140 } 141 142 143 void LIR_Assembler::append_code_stub(CodeStub* stub) { 144 _slow_case_stubs->append(stub); 145 } 146 147 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 148 for (int m = 0; m < stub_list->length(); m++) { 149 CodeStub* s = (*stub_list)[m]; 150 151 check_codespace(); 152 CHECK_BAILOUT(); 153 154 #ifndef PRODUCT 155 if (CommentedAssembly) { 156 stringStream st; 157 s->print_name(&st); 158 st.print(" slow case"); 159 _masm->block_comment(st.as_string()); 160 } 161 #endif 162 s->emit_code(this); 163 #ifdef ASSERT 164 s->assert_no_unbound_labels(); 165 #endif 166 } 167 } 168 169 170 void LIR_Assembler::emit_slow_case_stubs() { 171 emit_stubs(_slow_case_stubs); 172 } 173 174 175 bool LIR_Assembler::needs_icache(ciMethod* method) const { 176 return !method->is_static(); 177 } 178 179 180 int LIR_Assembler::code_offset() const { 181 return _masm->offset(); 182 } 183 184 185 address LIR_Assembler::pc() const { 186 return _masm->pc(); 187 } 188 189 // To bang the stack of this compiled method we use the stack size 190 // that the interpreter would need in case of a deoptimization. This 191 // removes the need to bang the stack in the deoptimization blob which 192 // in turn simplifies stack overflow handling. 193 int LIR_Assembler::bang_size_in_bytes() const { 194 return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size()); 195 } 196 197 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 198 for (int i = 0; i < info_list->length(); i++) { 199 XHandlers* handlers = info_list->at(i)->exception_handlers(); 200 201 for (int j = 0; j < handlers->length(); j++) { 202 XHandler* handler = handlers->handler_at(j); 203 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 204 assert(handler->entry_code() == NULL || 205 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 206 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 207 208 if (handler->entry_pco() == -1) { 209 // entry code not emitted yet 210 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 211 handler->set_entry_pco(code_offset()); 212 if (CommentedAssembly) { 213 _masm->block_comment("Exception adapter block"); 214 } 215 emit_lir_list(handler->entry_code()); 216 } else { 217 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 218 } 219 220 assert(handler->entry_pco() != -1, "must be set now"); 221 } 222 } 223 } 224 } 225 226 227 void LIR_Assembler::emit_code(BlockList* hir) { 228 if (PrintLIR) { 229 print_LIR(hir); 230 } 231 232 int n = hir->length(); 233 for (int i = 0; i < n; i++) { 234 emit_block(hir->at(i)); 235 CHECK_BAILOUT(); 236 } 237 238 flush_debug_info(code_offset()); 239 240 DEBUG_ONLY(check_no_unbound_labels()); 241 } 242 243 244 void LIR_Assembler::emit_block(BlockBegin* block) { 245 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 246 align_backward_branch_target(); 247 } 248 249 // if this block is the start of an exception handler, record the 250 // PC offset of the first instruction for later construction of 251 // the ExceptionHandlerTable 252 if (block->is_set(BlockBegin::exception_entry_flag)) { 253 block->set_exception_handler_pco(code_offset()); 254 } 255 256 #ifndef PRODUCT 257 if (PrintLIRWithAssembly) { 258 // don't print Phi's 259 InstructionPrinter ip(false); 260 block->print(ip); 261 } 262 #endif /* PRODUCT */ 263 264 assert(block->lir() != NULL, "must have LIR"); 265 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 266 267 #ifndef PRODUCT 268 if (CommentedAssembly) { 269 stringStream st; 270 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 271 _masm->block_comment(st.as_string()); 272 } 273 #endif 274 275 emit_lir_list(block->lir()); 276 277 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 278 } 279 280 281 void LIR_Assembler::emit_lir_list(LIR_List* list) { 282 peephole(list); 283 284 int n = list->length(); 285 for (int i = 0; i < n; i++) { 286 LIR_Op* op = list->at(i); 287 288 check_codespace(); 289 CHECK_BAILOUT(); 290 291 #ifndef PRODUCT 292 if (CommentedAssembly) { 293 // Don't record out every op since that's too verbose. Print 294 // branches since they include block and stub names. Also print 295 // patching moves since they generate funny looking code. 296 if (op->code() == lir_branch || 297 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) { 298 stringStream st; 299 op->print_on(&st); 300 _masm->block_comment(st.as_string()); 301 } 302 } 303 if (PrintLIRWithAssembly) { 304 // print out the LIR operation followed by the resulting assembly 305 list->at(i)->print(); tty->cr(); 306 } 307 #endif /* PRODUCT */ 308 309 op->emit_code(this); 310 311 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 312 process_debug_info(op); 313 } 314 315 #ifndef PRODUCT 316 if (PrintLIRWithAssembly) { 317 _masm->code()->decode(); 318 } 319 #endif /* PRODUCT */ 320 } 321 } 322 323 #ifdef ASSERT 324 void LIR_Assembler::check_no_unbound_labels() { 325 CHECK_BAILOUT(); 326 327 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 328 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 329 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 330 assert(false, "unbound label"); 331 } 332 } 333 } 334 #endif 335 336 //----------------------------------debug info-------------------------------- 337 338 339 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 340 _masm->code_section()->relocate(pc(), relocInfo::poll_type); 341 int pc_offset = code_offset(); 342 flush_debug_info(pc_offset); 343 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 344 if (info->exception_handlers() != NULL) { 345 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 346 } 347 } 348 349 350 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 351 flush_debug_info(pc_offset); 352 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 353 if (cinfo->exception_handlers() != NULL) { 354 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 355 } 356 } 357 358 static ValueStack* debug_info(Instruction* ins) { 359 StateSplit* ss = ins->as_StateSplit(); 360 if (ss != NULL) return ss->state(); 361 return ins->state_before(); 362 } 363 364 void LIR_Assembler::process_debug_info(LIR_Op* op) { 365 Instruction* src = op->source(); 366 if (src == NULL) return; 367 int pc_offset = code_offset(); 368 if (_pending_non_safepoint == src) { 369 _pending_non_safepoint_end_offset = pc_offset; 370 return; 371 } 372 ValueStack* vstack = debug_info(src); 373 if (vstack == NULL) return; 374 if (_pending_non_safepoint != NULL) { 375 // Got some old debug info. Get rid of it. 376 if (debug_info(_pending_non_safepoint) == vstack) { 377 _pending_non_safepoint_end_offset = pc_offset; 378 return; 379 } 380 if (_pending_non_safepoint_end_offset < pc_offset) { 381 record_non_safepoint_debug_info(); 382 } else if (_pending_non_safepoint_begin_offset < pc_offset) { 383 _pending_non_safepoint_end_offset = pc_offset - 1; 384 record_non_safepoint_debug_info(); 385 } 386 _pending_non_safepoint = NULL; 387 } 388 // Remember the debug info. 389 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 390 _pending_non_safepoint = src; 391 _pending_non_safepoint_begin_offset = pc_offset; 392 _pending_non_safepoint_end_offset = pc_offset; 393 } 394 } 395 396 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 397 // Return NULL if n is too large. 398 // Returns the caller_bci for the next-younger state, also. 399 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 400 ValueStack* t = s; 401 for (int i = 0; i < n; i++) { 402 if (t == NULL) break; 403 t = t->caller_state(); 404 } 405 if (t == NULL) return NULL; 406 for (;;) { 407 ValueStack* tc = t->caller_state(); 408 if (tc == NULL) return s; 409 t = tc; 410 bci_result = tc->bci(); 411 s = s->caller_state(); 412 } 413 } 414 415 void LIR_Assembler::record_non_safepoint_debug_info() { 416 int pc_offset = _pending_non_safepoint_end_offset; 417 ValueStack* vstack = debug_info(_pending_non_safepoint); 418 int bci = vstack->bci(); 419 420 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 421 assert(debug_info->recording_non_safepoints(), "sanity"); 422 423 debug_info->add_non_safepoint(pc_offset); 424 425 // Visit scopes from oldest to youngest. 426 for (int n = 0; ; n++) { 427 int s_bci = bci; 428 ValueStack* s = nth_oldest(vstack, n, s_bci); 429 if (s == NULL) break; 430 IRScope* scope = s->scope(); 431 //Always pass false for reexecute since these ScopeDescs are never used for deopt 432 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/); 433 } 434 435 debug_info->end_non_safepoint(pc_offset); 436 } 437 438 439 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 440 add_debug_info_for_null_check(code_offset(), cinfo); 441 } 442 443 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 444 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 445 append_code_stub(stub); 446 } 447 448 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 449 add_debug_info_for_div0(code_offset(), info); 450 } 451 452 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 453 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 454 append_code_stub(stub); 455 } 456 457 void LIR_Assembler::add_non_safepoint_debug_info_here(CodeEmitInfo* info) { 458 flush_debug_info(code_offset()); 459 info->record_non_safepoint_debug_info(compilation()->debug_info_recorder(), code_offset()); 460 } 461 462 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 463 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 464 } 465 466 467 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 468 verify_oop_map(op->info()); 469 470 if (os::is_MP()) { 471 // must align calls sites, otherwise they can't be updated atomically on MP hardware 472 align_call(op->code()); 473 } 474 475 // emit the static call stub stuff out of line 476 emit_static_call_stub(); 477 478 switch (op->code()) { 479 case lir_static_call: 480 case lir_dynamic_call: 481 call(op, relocInfo::static_call_type); 482 break; 483 case lir_optvirtual_call: 484 call(op, relocInfo::opt_virtual_call_type); 485 break; 486 case lir_icvirtual_call: 487 ic_call(op); 488 break; 489 case lir_virtual_call: 490 vtable_call(op); 491 break; 492 default: 493 fatal(err_msg_res("unexpected op code: %s", op->name())); 494 break; 495 } 496 497 // JSR 292 498 // Record if this method has MethodHandle invokes. 499 if (op->is_method_handle_invoke()) { 500 compilation()->set_has_method_handle_invokes(true); 501 } 502 503 #if defined(X86) && defined(TIERED) 504 // C2 leave fpu stack dirty clean it 505 if (UseSSE < 2) { 506 int i; 507 for ( i = 1; i <= 7 ; i++ ) { 508 ffree(i); 509 } 510 if (!op->result_opr()->is_float_kind()) { 511 ffree(0); 512 } 513 } 514 #endif // X86 && TIERED 515 } 516 517 518 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 519 _masm->bind (*(op->label())); 520 } 521 522 523 void LIR_Assembler::emit_op1(LIR_Op1* op) { 524 switch (op->code()) { 525 case lir_move: 526 if (op->move_kind() == lir_move_volatile) { 527 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 528 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 529 } else { 530 move_op(op->in_opr(), op->result_opr(), op->type(), 531 op->patch_code(), op->info(), op->pop_fpu_stack(), 532 op->move_kind() == lir_move_unaligned, 533 op->move_kind() == lir_move_wide); 534 } 535 break; 536 537 case lir_prefetchr: 538 prefetchr(op->in_opr()); 539 break; 540 541 case lir_prefetchw: 542 prefetchw(op->in_opr()); 543 break; 544 545 case lir_roundfp: { 546 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 547 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 548 break; 549 } 550 551 case lir_return: 552 return_op(op->in_opr()); 553 break; 554 555 case lir_safepoint: 556 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 557 _masm->nop(); 558 } 559 safepoint_poll(op->in_opr(), op->info()); 560 break; 561 562 case lir_fxch: 563 fxch(op->in_opr()->as_jint()); 564 break; 565 566 case lir_fld: 567 fld(op->in_opr()->as_jint()); 568 break; 569 570 case lir_ffree: 571 ffree(op->in_opr()->as_jint()); 572 break; 573 574 case lir_branch: 575 break; 576 577 case lir_push: 578 push(op->in_opr()); 579 break; 580 581 case lir_pop: 582 pop(op->in_opr()); 583 break; 584 585 case lir_neg: 586 negate(op->in_opr(), op->result_opr()); 587 break; 588 589 case lir_leal: 590 leal(op->in_opr(), op->result_opr()); 591 break; 592 593 case lir_null_check: 594 if (GenerateCompilerNullChecks) { 595 add_debug_info_for_null_check_here(op->info()); 596 597 if (op->in_opr()->is_single_cpu()) { 598 _masm->null_check(op->in_opr()->as_register()); 599 } else { 600 Unimplemented(); 601 } 602 } 603 break; 604 605 case lir_monaddr: 606 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 607 break; 608 609 #ifdef SPARC 610 case lir_pack64: 611 pack64(op->in_opr(), op->result_opr()); 612 break; 613 614 case lir_unpack64: 615 unpack64(op->in_opr(), op->result_opr()); 616 break; 617 #endif 618 619 case lir_unwind: 620 unwind_op(op->in_opr()); 621 break; 622 623 default: 624 Unimplemented(); 625 break; 626 } 627 } 628 629 630 void LIR_Assembler::emit_op0(LIR_Op0* op) { 631 switch (op->code()) { 632 case lir_word_align: { 633 while (code_offset() % BytesPerWord != 0) { 634 _masm->nop(); 635 } 636 break; 637 } 638 639 case lir_nop: 640 assert(op->info() == NULL, "not supported"); 641 _masm->nop(); 642 break; 643 644 case lir_label: 645 Unimplemented(); 646 break; 647 648 case lir_build_frame: 649 build_frame(); 650 break; 651 652 case lir_std_entry: 653 // init offsets 654 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 655 _masm->align(CodeEntryAlignment); 656 if (needs_icache(compilation()->method())) { 657 check_icache(); 658 } 659 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 660 _masm->verified_entry(); 661 build_frame(); 662 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 663 break; 664 665 case lir_osr_entry: 666 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 667 osr_entry(); 668 break; 669 670 case lir_24bit_FPU: 671 set_24bit_FPU(); 672 break; 673 674 case lir_reset_FPU: 675 reset_FPU(); 676 break; 677 678 case lir_breakpoint: 679 breakpoint(); 680 break; 681 682 case lir_fpop_raw: 683 fpop(); 684 break; 685 686 case lir_membar: 687 membar(); 688 break; 689 690 case lir_membar_acquire: 691 membar_acquire(); 692 break; 693 694 case lir_membar_release: 695 membar_release(); 696 break; 697 698 case lir_membar_loadload: 699 membar_loadload(); 700 break; 701 702 case lir_membar_storestore: 703 membar_storestore(); 704 break; 705 706 case lir_membar_loadstore: 707 membar_loadstore(); 708 break; 709 710 case lir_membar_storeload: 711 membar_storeload(); 712 break; 713 714 case lir_get_thread: 715 get_thread(op->result_opr()); 716 break; 717 718 default: 719 ShouldNotReachHere(); 720 break; 721 } 722 } 723 724 725 void LIR_Assembler::emit_op2(LIR_Op2* op) { 726 switch (op->code()) { 727 case lir_cmp: 728 if (op->info() != NULL) { 729 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 730 "shouldn't be codeemitinfo for non-address operands"); 731 add_debug_info_for_null_check_here(op->info()); // exception possible 732 } 733 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 734 break; 735 736 case lir_cmp_l2i: 737 case lir_cmp_fd2i: 738 case lir_ucmp_fd2i: 739 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 740 break; 741 742 case lir_cmove: 743 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); 744 break; 745 746 case lir_shl: 747 case lir_shr: 748 case lir_ushr: 749 if (op->in_opr2()->is_constant()) { 750 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 751 } else { 752 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 753 } 754 break; 755 756 case lir_add: 757 case lir_sub: 758 case lir_mul: 759 case lir_mul_strictfp: 760 case lir_div: 761 case lir_div_strictfp: 762 case lir_rem: 763 assert(op->fpu_pop_count() < 2, ""); 764 arith_op( 765 op->code(), 766 op->in_opr1(), 767 op->in_opr2(), 768 op->result_opr(), 769 op->info(), 770 op->fpu_pop_count() == 1); 771 break; 772 773 case lir_abs: 774 case lir_sqrt: 775 case lir_sin: 776 case lir_tan: 777 case lir_cos: 778 case lir_log: 779 case lir_log10: 780 case lir_exp: 781 case lir_pow: 782 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 783 break; 784 785 case lir_logic_and: 786 case lir_logic_or: 787 case lir_logic_xor: 788 logic_op( 789 op->code(), 790 op->in_opr1(), 791 op->in_opr2(), 792 op->result_opr()); 793 break; 794 795 case lir_throw: 796 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 797 break; 798 799 case lir_xadd: 800 case lir_xchg: 801 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 802 break; 803 804 default: 805 Unimplemented(); 806 break; 807 } 808 } 809 810 811 void LIR_Assembler::build_frame() { 812 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 813 } 814 815 816 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 817 assert((src->is_single_fpu() && dest->is_single_stack()) || 818 (src->is_double_fpu() && dest->is_double_stack()), 819 "round_fp: rounds register -> stack location"); 820 821 reg2stack (src, dest, src->type(), pop_fpu_stack); 822 } 823 824 825 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 826 if (src->is_register()) { 827 if (dest->is_register()) { 828 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 829 reg2reg(src, dest); 830 } else if (dest->is_stack()) { 831 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 832 reg2stack(src, dest, type, pop_fpu_stack); 833 } else if (dest->is_address()) { 834 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 835 } else { 836 ShouldNotReachHere(); 837 } 838 839 } else if (src->is_stack()) { 840 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 841 if (dest->is_register()) { 842 stack2reg(src, dest, type); 843 } else if (dest->is_stack()) { 844 stack2stack(src, dest, type); 845 } else { 846 ShouldNotReachHere(); 847 } 848 849 } else if (src->is_constant()) { 850 if (dest->is_register()) { 851 const2reg(src, dest, patch_code, info); // patching is possible 852 } else if (dest->is_stack()) { 853 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 854 const2stack(src, dest); 855 } else if (dest->is_address()) { 856 assert(patch_code == lir_patch_none, "no patching allowed here"); 857 const2mem(src, dest, type, info, wide); 858 } else { 859 ShouldNotReachHere(); 860 } 861 862 } else if (src->is_address()) { 863 mem2reg(src, dest, type, patch_code, info, wide, unaligned); 864 865 } else { 866 ShouldNotReachHere(); 867 } 868 } 869 870 871 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 872 #ifndef PRODUCT 873 if (VerifyOops) { 874 OopMapStream s(info->oop_map()); 875 while (!s.is_done()) { 876 OopMapValue v = s.current(); 877 if (v.is_oop()) { 878 VMReg r = v.reg(); 879 if (!r->is_stack()) { 880 stringStream st; 881 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 882 #ifdef SPARC 883 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__); 884 #else 885 _masm->verify_oop(r->as_Register()); 886 #endif 887 } else { 888 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 889 } 890 } 891 check_codespace(); 892 CHECK_BAILOUT(); 893 894 s.next(); 895 } 896 } 897 #endif 898 }