1 /* 2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_Instruction.hpp" 28 #include "c1/c1_InstructionPrinter.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciInstance.hpp" 33 #include "runtime/os.hpp" 34 35 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 36 // We must have enough patching space so that call can be inserted. 37 // We cannot use fat nops here, since the concurrent code rewrite may transiently 38 // create the illegal instruction sequence. 39 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) { 40 _masm->nop(); 41 } 42 patch->install(_masm, patch_code, obj, info); 43 append_code_stub(patch); 44 45 #ifdef ASSERT 46 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 47 if (patch->id() == PatchingStub::access_field_id) { 48 switch (code) { 49 case Bytecodes::_putstatic: 50 case Bytecodes::_getstatic: 51 case Bytecodes::_putfield: 52 case Bytecodes::_getfield: 53 break; 54 default: 55 ShouldNotReachHere(); 56 } 57 } else if (patch->id() == PatchingStub::load_klass_id) { 58 switch (code) { 59 case Bytecodes::_new: 60 case Bytecodes::_anewarray: 61 case Bytecodes::_multianewarray: 62 case Bytecodes::_instanceof: 63 case Bytecodes::_checkcast: 64 break; 65 default: 66 ShouldNotReachHere(); 67 } 68 } else if (patch->id() == PatchingStub::load_mirror_id) { 69 switch (code) { 70 case Bytecodes::_putstatic: 71 case Bytecodes::_getstatic: 72 case Bytecodes::_ldc: 73 case Bytecodes::_ldc_w: 74 break; 75 default: 76 ShouldNotReachHere(); 77 } 78 } else if (patch->id() == PatchingStub::load_appendix_id) { 79 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); 80 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); 81 } else { 82 ShouldNotReachHere(); 83 } 84 #endif 85 } 86 87 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { 88 IRScope* scope = info->scope(); 89 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); 90 if (Bytecodes::has_optional_appendix(bc_raw)) { 91 return PatchingStub::load_appendix_id; 92 } 93 return PatchingStub::load_mirror_id; 94 } 95 96 //--------------------------------------------------------------- 97 98 99 LIR_Assembler::LIR_Assembler(Compilation* c): 100 _compilation(c) 101 , _masm(c->masm()) 102 , _bs(Universe::heap()->barrier_set()) 103 , _frame_map(c->frame_map()) 104 , _current_block(NULL) 105 , _pending_non_safepoint(NULL) 106 , _pending_non_safepoint_offset(0) 107 { 108 _slow_case_stubs = new CodeStubList(); 109 } 110 111 112 LIR_Assembler::~LIR_Assembler() { 113 } 114 115 116 void LIR_Assembler::check_codespace() { 117 CodeSection* cs = _masm->code_section(); 118 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 119 BAILOUT("CodeBuffer overflow"); 120 } 121 } 122 123 124 void LIR_Assembler::append_code_stub(CodeStub* stub) { 125 _slow_case_stubs->append(stub); 126 } 127 128 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 129 for (int m = 0; m < stub_list->length(); m++) { 130 CodeStub* s = stub_list->at(m); 131 132 check_codespace(); 133 CHECK_BAILOUT(); 134 135 #ifndef PRODUCT 136 if (CommentedAssembly) { 137 stringStream st; 138 s->print_name(&st); 139 st.print(" slow case"); 140 _masm->block_comment(st.as_string()); 141 } 142 #endif 143 s->emit_code(this); 144 #ifdef ASSERT 145 s->assert_no_unbound_labels(); 146 #endif 147 } 148 } 149 150 151 void LIR_Assembler::emit_slow_case_stubs() { 152 emit_stubs(_slow_case_stubs); 153 } 154 155 156 bool LIR_Assembler::needs_icache(ciMethod* method) const { 157 return !method->is_static(); 158 } 159 160 161 int LIR_Assembler::code_offset() const { 162 return _masm->offset(); 163 } 164 165 166 address LIR_Assembler::pc() const { 167 return _masm->pc(); 168 } 169 170 // To bang the stack of this compiled method we use the stack size 171 // that the interpreter would need in case of a deoptimization. This 172 // removes the need to bang the stack in the deoptimization blob which 173 // in turn simplifies stack overflow handling. 174 int LIR_Assembler::bang_size_in_bytes() const { 175 return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size()); 176 } 177 178 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 179 for (int i = 0; i < info_list->length(); i++) { 180 XHandlers* handlers = info_list->at(i)->exception_handlers(); 181 182 for (int j = 0; j < handlers->length(); j++) { 183 XHandler* handler = handlers->handler_at(j); 184 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 185 assert(handler->entry_code() == NULL || 186 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 187 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 188 189 if (handler->entry_pco() == -1) { 190 // entry code not emitted yet 191 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 192 handler->set_entry_pco(code_offset()); 193 if (CommentedAssembly) { 194 _masm->block_comment("Exception adapter block"); 195 } 196 emit_lir_list(handler->entry_code()); 197 } else { 198 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 199 } 200 201 assert(handler->entry_pco() != -1, "must be set now"); 202 } 203 } 204 } 205 } 206 207 208 void LIR_Assembler::emit_code(BlockList* hir) { 209 if (PrintLIR) { 210 print_LIR(hir); 211 } 212 213 int n = hir->length(); 214 for (int i = 0; i < n; i++) { 215 emit_block(hir->at(i)); 216 CHECK_BAILOUT(); 217 } 218 219 flush_debug_info(code_offset()); 220 221 DEBUG_ONLY(check_no_unbound_labels()); 222 } 223 224 225 void LIR_Assembler::emit_block(BlockBegin* block) { 226 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 227 align_backward_branch_target(); 228 } 229 230 // if this block is the start of an exception handler, record the 231 // PC offset of the first instruction for later construction of 232 // the ExceptionHandlerTable 233 if (block->is_set(BlockBegin::exception_entry_flag)) { 234 block->set_exception_handler_pco(code_offset()); 235 } 236 237 #ifndef PRODUCT 238 if (PrintLIRWithAssembly) { 239 // don't print Phi's 240 InstructionPrinter ip(false); 241 block->print(ip); 242 } 243 #endif /* PRODUCT */ 244 245 assert(block->lir() != NULL, "must have LIR"); 246 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 247 248 #ifndef PRODUCT 249 if (CommentedAssembly) { 250 stringStream st; 251 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 252 _masm->block_comment(st.as_string()); 253 } 254 #endif 255 256 emit_lir_list(block->lir()); 257 258 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 259 } 260 261 262 void LIR_Assembler::emit_lir_list(LIR_List* list) { 263 peephole(list); 264 265 int n = list->length(); 266 for (int i = 0; i < n; i++) { 267 LIR_Op* op = list->at(i); 268 269 check_codespace(); 270 CHECK_BAILOUT(); 271 272 #ifndef PRODUCT 273 if (CommentedAssembly) { 274 // Don't record out every op since that's too verbose. Print 275 // branches since they include block and stub names. Also print 276 // patching moves since they generate funny looking code. 277 if (op->code() == lir_branch || 278 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) { 279 stringStream st; 280 op->print_on(&st); 281 _masm->block_comment(st.as_string()); 282 } 283 } 284 if (PrintLIRWithAssembly) { 285 // print out the LIR operation followed by the resulting assembly 286 list->at(i)->print(); tty->cr(); 287 } 288 #endif /* PRODUCT */ 289 290 op->emit_code(this); 291 292 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 293 process_debug_info(op); 294 } 295 296 #ifndef PRODUCT 297 if (PrintLIRWithAssembly) { 298 _masm->code()->decode(); 299 } 300 #endif /* PRODUCT */ 301 } 302 } 303 304 #ifdef ASSERT 305 void LIR_Assembler::check_no_unbound_labels() { 306 CHECK_BAILOUT(); 307 308 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 309 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 310 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 311 assert(false, "unbound label"); 312 } 313 } 314 } 315 #endif 316 317 //----------------------------------debug info-------------------------------- 318 319 320 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 321 int pc_offset = code_offset(); 322 flush_debug_info(pc_offset); 323 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 324 if (info->exception_handlers() != NULL) { 325 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 326 } 327 } 328 329 330 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 331 flush_debug_info(pc_offset); 332 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 333 if (cinfo->exception_handlers() != NULL) { 334 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 335 } 336 } 337 338 static ValueStack* debug_info(Instruction* ins) { 339 StateSplit* ss = ins->as_StateSplit(); 340 if (ss != NULL) return ss->state(); 341 return ins->state_before(); 342 } 343 344 void LIR_Assembler::process_debug_info(LIR_Op* op) { 345 Instruction* src = op->source(); 346 if (src == NULL) return; 347 int pc_offset = code_offset(); 348 if (_pending_non_safepoint == src) { 349 _pending_non_safepoint_offset = pc_offset; 350 return; 351 } 352 ValueStack* vstack = debug_info(src); 353 if (vstack == NULL) return; 354 if (_pending_non_safepoint != NULL) { 355 // Got some old debug info. Get rid of it. 356 if (debug_info(_pending_non_safepoint) == vstack) { 357 _pending_non_safepoint_offset = pc_offset; 358 return; 359 } 360 if (_pending_non_safepoint_offset < pc_offset) { 361 record_non_safepoint_debug_info(); 362 } 363 _pending_non_safepoint = NULL; 364 } 365 // Remember the debug info. 366 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 367 _pending_non_safepoint = src; 368 _pending_non_safepoint_offset = pc_offset; 369 } 370 } 371 372 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 373 // Return NULL if n is too large. 374 // Returns the caller_bci for the next-younger state, also. 375 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 376 ValueStack* t = s; 377 for (int i = 0; i < n; i++) { 378 if (t == NULL) break; 379 t = t->caller_state(); 380 } 381 if (t == NULL) return NULL; 382 for (;;) { 383 ValueStack* tc = t->caller_state(); 384 if (tc == NULL) return s; 385 t = tc; 386 bci_result = tc->bci(); 387 s = s->caller_state(); 388 } 389 } 390 391 void LIR_Assembler::record_non_safepoint_debug_info() { 392 int pc_offset = _pending_non_safepoint_offset; 393 ValueStack* vstack = debug_info(_pending_non_safepoint); 394 int bci = vstack->bci(); 395 396 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 397 assert(debug_info->recording_non_safepoints(), "sanity"); 398 399 debug_info->add_non_safepoint(pc_offset); 400 401 // Visit scopes from oldest to youngest. 402 for (int n = 0; ; n++) { 403 int s_bci = bci; 404 ValueStack* s = nth_oldest(vstack, n, s_bci); 405 if (s == NULL) break; 406 IRScope* scope = s->scope(); 407 //Always pass false for reexecute since these ScopeDescs are never used for deopt 408 methodHandle null_mh; 409 debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/); 410 } 411 412 debug_info->end_non_safepoint(pc_offset); 413 } 414 415 416 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 417 return add_debug_info_for_null_check(code_offset(), cinfo); 418 } 419 420 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 421 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 422 append_code_stub(stub); 423 return stub; 424 } 425 426 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 427 add_debug_info_for_div0(code_offset(), info); 428 } 429 430 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 431 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 432 append_code_stub(stub); 433 } 434 435 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 436 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 437 } 438 439 440 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 441 verify_oop_map(op->info()); 442 443 if (os::is_MP()) { 444 // must align calls sites, otherwise they can't be updated atomically on MP hardware 445 align_call(op->code()); 446 } 447 448 // emit the static call stub stuff out of line 449 emit_static_call_stub(); 450 CHECK_BAILOUT(); 451 452 switch (op->code()) { 453 case lir_static_call: 454 case lir_dynamic_call: 455 call(op, relocInfo::static_call_type); 456 break; 457 case lir_optvirtual_call: 458 call(op, relocInfo::opt_virtual_call_type); 459 break; 460 case lir_icvirtual_call: 461 ic_call(op); 462 break; 463 case lir_virtual_call: 464 vtable_call(op); 465 break; 466 default: 467 fatal("unexpected op code: %s", op->name()); 468 break; 469 } 470 471 // JSR 292 472 // Record if this method has MethodHandle invokes. 473 if (op->is_method_handle_invoke()) { 474 compilation()->set_has_method_handle_invokes(true); 475 } 476 477 #if defined(X86) && defined(TIERED) 478 // C2 leave fpu stack dirty clean it 479 if (UseSSE < 2) { 480 int i; 481 for ( i = 1; i <= 7 ; i++ ) { 482 ffree(i); 483 } 484 if (!op->result_opr()->is_float_kind()) { 485 ffree(0); 486 } 487 } 488 #endif // X86 && TIERED 489 } 490 491 492 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 493 _masm->bind (*(op->label())); 494 } 495 496 497 void LIR_Assembler::emit_op1(LIR_Op1* op) { 498 switch (op->code()) { 499 case lir_move: 500 if (op->move_kind() == lir_move_volatile) { 501 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 502 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 503 } else { 504 move_op(op->in_opr(), op->result_opr(), op->type(), 505 op->patch_code(), op->info(), op->pop_fpu_stack(), 506 op->move_kind() == lir_move_unaligned, 507 op->move_kind() == lir_move_wide); 508 } 509 break; 510 511 case lir_roundfp: { 512 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 513 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 514 break; 515 } 516 517 case lir_return: 518 return_op(op->in_opr()); 519 break; 520 521 case lir_safepoint: 522 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 523 _masm->nop(); 524 } 525 safepoint_poll(op->in_opr(), op->info()); 526 break; 527 528 case lir_fxch: 529 fxch(op->in_opr()->as_jint()); 530 break; 531 532 case lir_fld: 533 fld(op->in_opr()->as_jint()); 534 break; 535 536 case lir_ffree: 537 ffree(op->in_opr()->as_jint()); 538 break; 539 540 case lir_branch: 541 break; 542 543 case lir_push: 544 push(op->in_opr()); 545 break; 546 547 case lir_pop: 548 pop(op->in_opr()); 549 break; 550 551 case lir_neg: 552 negate(op->in_opr(), op->result_opr()); 553 break; 554 555 case lir_leal: 556 leal(op->in_opr(), op->result_opr()); 557 break; 558 559 case lir_null_check: { 560 ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info()); 561 562 if (op->in_opr()->is_single_cpu()) { 563 _masm->null_check(op->in_opr()->as_register(), stub->entry()); 564 } else { 565 Unimplemented(); 566 } 567 break; 568 } 569 570 case lir_monaddr: 571 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 572 break; 573 574 #ifdef SPARC 575 case lir_pack64: 576 pack64(op->in_opr(), op->result_opr()); 577 break; 578 579 case lir_unpack64: 580 unpack64(op->in_opr(), op->result_opr()); 581 break; 582 #endif 583 584 case lir_unwind: 585 unwind_op(op->in_opr()); 586 break; 587 588 default: 589 Unimplemented(); 590 break; 591 } 592 } 593 594 595 void LIR_Assembler::emit_op0(LIR_Op0* op) { 596 switch (op->code()) { 597 case lir_word_align: { 598 _masm->align(BytesPerWord); 599 break; 600 } 601 602 case lir_nop: 603 assert(op->info() == NULL, "not supported"); 604 _masm->nop(); 605 break; 606 607 case lir_label: 608 Unimplemented(); 609 break; 610 611 case lir_build_frame: 612 build_frame(); 613 break; 614 615 case lir_std_entry: 616 // init offsets 617 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 618 _masm->align(CodeEntryAlignment); 619 if (needs_icache(compilation()->method())) { 620 check_icache(); 621 } 622 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 623 _masm->verified_entry(); 624 build_frame(); 625 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 626 break; 627 628 case lir_osr_entry: 629 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 630 osr_entry(); 631 break; 632 633 case lir_24bit_FPU: 634 set_24bit_FPU(); 635 break; 636 637 case lir_reset_FPU: 638 reset_FPU(); 639 break; 640 641 case lir_breakpoint: 642 breakpoint(); 643 break; 644 645 case lir_fpop_raw: 646 fpop(); 647 break; 648 649 case lir_membar: 650 membar(); 651 break; 652 653 case lir_membar_acquire: 654 membar_acquire(); 655 break; 656 657 case lir_membar_release: 658 membar_release(); 659 break; 660 661 case lir_membar_loadload: 662 membar_loadload(); 663 break; 664 665 case lir_membar_storestore: 666 membar_storestore(); 667 break; 668 669 case lir_membar_loadstore: 670 membar_loadstore(); 671 break; 672 673 case lir_membar_storeload: 674 membar_storeload(); 675 break; 676 677 case lir_get_thread: 678 get_thread(op->result_opr()); 679 break; 680 681 case lir_on_spin_wait: 682 on_spin_wait(); 683 break; 684 685 default: 686 ShouldNotReachHere(); 687 break; 688 } 689 } 690 691 692 void LIR_Assembler::emit_op2(LIR_Op2* op) { 693 switch (op->code()) { 694 case lir_cmp: 695 if (op->info() != NULL) { 696 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 697 "shouldn't be codeemitinfo for non-address operands"); 698 add_debug_info_for_null_check_here(op->info()); // exception possible 699 } 700 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 701 break; 702 703 case lir_cmp_l2i: 704 case lir_cmp_fd2i: 705 case lir_ucmp_fd2i: 706 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 707 break; 708 709 case lir_cmove: 710 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); 711 break; 712 713 case lir_shl: 714 case lir_shr: 715 case lir_ushr: 716 if (op->in_opr2()->is_constant()) { 717 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 718 } else { 719 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 720 } 721 break; 722 723 case lir_add: 724 case lir_sub: 725 case lir_mul: 726 case lir_mul_strictfp: 727 case lir_div: 728 case lir_div_strictfp: 729 case lir_rem: 730 assert(op->fpu_pop_count() < 2, ""); 731 arith_op( 732 op->code(), 733 op->in_opr1(), 734 op->in_opr2(), 735 op->result_opr(), 736 op->info(), 737 op->fpu_pop_count() == 1); 738 break; 739 740 case lir_abs: 741 case lir_sqrt: 742 case lir_tan: 743 case lir_log10: 744 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 745 break; 746 747 case lir_logic_and: 748 case lir_logic_or: 749 case lir_logic_xor: 750 logic_op( 751 op->code(), 752 op->in_opr1(), 753 op->in_opr2(), 754 op->result_opr()); 755 break; 756 757 case lir_throw: 758 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 759 break; 760 761 case lir_xadd: 762 case lir_xchg: 763 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 764 break; 765 766 default: 767 Unimplemented(); 768 break; 769 } 770 } 771 772 773 void LIR_Assembler::build_frame() { 774 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 775 } 776 777 778 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 779 assert((src->is_single_fpu() && dest->is_single_stack()) || 780 (src->is_double_fpu() && dest->is_double_stack()), 781 "round_fp: rounds register -> stack location"); 782 783 reg2stack (src, dest, src->type(), pop_fpu_stack); 784 } 785 786 787 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 788 if (src->is_register()) { 789 if (dest->is_register()) { 790 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 791 reg2reg(src, dest); 792 } else if (dest->is_stack()) { 793 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 794 reg2stack(src, dest, type, pop_fpu_stack); 795 } else if (dest->is_address()) { 796 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 797 } else { 798 ShouldNotReachHere(); 799 } 800 801 } else if (src->is_stack()) { 802 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 803 if (dest->is_register()) { 804 stack2reg(src, dest, type); 805 } else if (dest->is_stack()) { 806 stack2stack(src, dest, type); 807 } else { 808 ShouldNotReachHere(); 809 } 810 811 } else if (src->is_constant()) { 812 if (dest->is_register()) { 813 const2reg(src, dest, patch_code, info); // patching is possible 814 } else if (dest->is_stack()) { 815 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 816 const2stack(src, dest); 817 } else if (dest->is_address()) { 818 assert(patch_code == lir_patch_none, "no patching allowed here"); 819 const2mem(src, dest, type, info, wide); 820 } else { 821 ShouldNotReachHere(); 822 } 823 824 } else if (src->is_address()) { 825 mem2reg(src, dest, type, patch_code, info, wide, unaligned); 826 827 } else { 828 ShouldNotReachHere(); 829 } 830 } 831 832 833 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 834 #ifndef PRODUCT 835 if (VerifyOops) { 836 OopMapStream s(info->oop_map()); 837 while (!s.is_done()) { 838 OopMapValue v = s.current(); 839 if (v.is_oop()) { 840 VMReg r = v.reg(); 841 if (!r->is_stack()) { 842 stringStream st; 843 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 844 #ifdef SPARC 845 _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__); 846 #else 847 _masm->verify_oop(r->as_Register()); 848 #endif 849 } else { 850 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 851 } 852 } 853 check_codespace(); 854 CHECK_BAILOUT(); 855 856 s.next(); 857 } 858 } 859 #endif 860 }