1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Compilation.hpp" 27 #include "c1/c1_Instruction.hpp" 28 #include "c1/c1_InstructionPrinter.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciInstance.hpp" 33 #include "gc/shared/barrierSet.hpp" 34 #include "runtime/os.hpp" 35 36 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 37 // We must have enough patching space so that call can be inserted. 38 // We cannot use fat nops here, since the concurrent code rewrite may transiently 39 // create the illegal instruction sequence. 40 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) { 41 _masm->nop(); 42 } 43 patch->install(_masm, patch_code, obj, info); 44 append_code_stub(patch); 45 46 #ifdef ASSERT 47 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 48 if (patch->id() == PatchingStub::access_field_id) { 49 switch (code) { 50 case Bytecodes::_putstatic: 51 case Bytecodes::_getstatic: 52 case Bytecodes::_putfield: 53 case Bytecodes::_getfield: 54 break; 55 default: 56 ShouldNotReachHere(); 57 } 58 } else if (patch->id() == PatchingStub::load_klass_id) { 59 switch (code) { 60 case Bytecodes::_new: 61 case Bytecodes::_anewarray: 62 case Bytecodes::_multianewarray: 63 case Bytecodes::_instanceof: 64 case Bytecodes::_checkcast: 65 break; 66 default: 67 ShouldNotReachHere(); 68 } 69 } else if (patch->id() == PatchingStub::load_mirror_id) { 70 switch (code) { 71 case Bytecodes::_putstatic: 72 case Bytecodes::_getstatic: 73 case Bytecodes::_ldc: 74 case Bytecodes::_ldc_w: 75 break; 76 default: 77 ShouldNotReachHere(); 78 } 79 } else if (patch->id() == PatchingStub::load_appendix_id) { 80 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); 81 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); 82 } else { 83 ShouldNotReachHere(); 84 } 85 #endif 86 } 87 88 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { 89 IRScope* scope = info->scope(); 90 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); 91 if (Bytecodes::has_optional_appendix(bc_raw)) { 92 return PatchingStub::load_appendix_id; 93 } 94 return PatchingStub::load_mirror_id; 95 } 96 97 //--------------------------------------------------------------- 98 99 100 LIR_Assembler::LIR_Assembler(Compilation* c): 101 _compilation(c) 102 , _masm(c->masm()) 103 , _bs(BarrierSet::barrier_set()) 104 , _frame_map(c->frame_map()) 105 , _current_block(NULL) 106 , _pending_non_safepoint(NULL) 107 , _pending_non_safepoint_offset(0) 108 { 109 _slow_case_stubs = new CodeStubList(); 110 } 111 112 113 LIR_Assembler::~LIR_Assembler() { 114 } 115 116 117 void LIR_Assembler::check_codespace() { 118 CodeSection* cs = _masm->code_section(); 119 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 120 BAILOUT("CodeBuffer overflow"); 121 } 122 } 123 124 125 void LIR_Assembler::append_code_stub(CodeStub* stub) { 126 _slow_case_stubs->append(stub); 127 } 128 129 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 130 for (int m = 0; m < stub_list->length(); m++) { 131 CodeStub* s = stub_list->at(m); 132 133 check_codespace(); 134 CHECK_BAILOUT(); 135 136 #ifndef PRODUCT 137 if (CommentedAssembly) { 138 stringStream st; 139 s->print_name(&st); 140 st.print(" slow case"); 141 _masm->block_comment(st.as_string()); 142 } 143 #endif 144 s->emit_code(this); 145 #ifdef ASSERT 146 s->assert_no_unbound_labels(); 147 #endif 148 } 149 } 150 151 152 void LIR_Assembler::emit_slow_case_stubs() { 153 emit_stubs(_slow_case_stubs); 154 } 155 156 157 bool LIR_Assembler::needs_icache(ciMethod* method) const { 158 return !method->is_static(); 159 } 160 161 162 int LIR_Assembler::code_offset() const { 163 return _masm->offset(); 164 } 165 166 167 address LIR_Assembler::pc() const { 168 return _masm->pc(); 169 } 170 171 // To bang the stack of this compiled method we use the stack size 172 // that the interpreter would need in case of a deoptimization. This 173 // removes the need to bang the stack in the deoptimization blob which 174 // in turn simplifies stack overflow handling. 175 int LIR_Assembler::bang_size_in_bytes() const { 176 return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size()); 177 } 178 179 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 180 for (int i = 0; i < info_list->length(); i++) { 181 XHandlers* handlers = info_list->at(i)->exception_handlers(); 182 183 for (int j = 0; j < handlers->length(); j++) { 184 XHandler* handler = handlers->handler_at(j); 185 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 186 assert(handler->entry_code() == NULL || 187 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 188 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 189 190 if (handler->entry_pco() == -1) { 191 // entry code not emitted yet 192 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 193 handler->set_entry_pco(code_offset()); 194 if (CommentedAssembly) { 195 _masm->block_comment("Exception adapter block"); 196 } 197 emit_lir_list(handler->entry_code()); 198 } else { 199 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 200 } 201 202 assert(handler->entry_pco() != -1, "must be set now"); 203 } 204 } 205 } 206 } 207 208 209 void LIR_Assembler::emit_code(BlockList* hir) { 210 if (PrintLIR) { 211 print_LIR(hir); 212 } 213 214 int n = hir->length(); 215 for (int i = 0; i < n; i++) { 216 emit_block(hir->at(i)); 217 CHECK_BAILOUT(); 218 } 219 220 flush_debug_info(code_offset()); 221 222 DEBUG_ONLY(check_no_unbound_labels()); 223 } 224 225 226 void LIR_Assembler::emit_block(BlockBegin* block) { 227 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 228 align_backward_branch_target(); 229 } 230 231 // if this block is the start of an exception handler, record the 232 // PC offset of the first instruction for later construction of 233 // the ExceptionHandlerTable 234 if (block->is_set(BlockBegin::exception_entry_flag)) { 235 block->set_exception_handler_pco(code_offset()); 236 } 237 238 #ifndef PRODUCT 239 if (PrintLIRWithAssembly) { 240 // don't print Phi's 241 InstructionPrinter ip(false); 242 block->print(ip); 243 } 244 #endif /* PRODUCT */ 245 246 assert(block->lir() != NULL, "must have LIR"); 247 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 248 249 #ifndef PRODUCT 250 if (CommentedAssembly) { 251 stringStream st; 252 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 253 _masm->block_comment(st.as_string()); 254 } 255 #endif 256 257 emit_lir_list(block->lir()); 258 259 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 260 } 261 262 263 void LIR_Assembler::emit_lir_list(LIR_List* list) { 264 peephole(list); 265 266 int n = list->length(); 267 for (int i = 0; i < n; i++) { 268 LIR_Op* op = list->at(i); 269 270 check_codespace(); 271 CHECK_BAILOUT(); 272 273 #ifndef PRODUCT 274 if (CommentedAssembly) { 275 // Don't record out every op since that's too verbose. Print 276 // branches since they include block and stub names. Also print 277 // patching moves since they generate funny looking code. 278 if (op->code() == lir_branch || 279 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) || 280 (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) { 281 stringStream st; 282 op->print_on(&st); 283 _masm->block_comment(st.as_string()); 284 } 285 } 286 if (PrintLIRWithAssembly) { 287 // print out the LIR operation followed by the resulting assembly 288 list->at(i)->print(); tty->cr(); 289 } 290 #endif /* PRODUCT */ 291 292 op->emit_code(this); 293 294 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 295 process_debug_info(op); 296 } 297 298 #ifndef PRODUCT 299 if (PrintLIRWithAssembly) { 300 _masm->code()->decode(); 301 } 302 #endif /* PRODUCT */ 303 } 304 } 305 306 #ifdef ASSERT 307 void LIR_Assembler::check_no_unbound_labels() { 308 CHECK_BAILOUT(); 309 310 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 311 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 312 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 313 assert(false, "unbound label"); 314 } 315 } 316 } 317 #endif 318 319 //----------------------------------debug info-------------------------------- 320 321 322 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 323 int pc_offset = code_offset(); 324 flush_debug_info(pc_offset); 325 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 326 if (info->exception_handlers() != NULL) { 327 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 328 } 329 } 330 331 332 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 333 flush_debug_info(pc_offset); 334 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 335 if (cinfo->exception_handlers() != NULL) { 336 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 337 } 338 } 339 340 static ValueStack* debug_info(Instruction* ins) { 341 StateSplit* ss = ins->as_StateSplit(); 342 if (ss != NULL) return ss->state(); 343 return ins->state_before(); 344 } 345 346 void LIR_Assembler::process_debug_info(LIR_Op* op) { 347 Instruction* src = op->source(); 348 if (src == NULL) return; 349 int pc_offset = code_offset(); 350 if (_pending_non_safepoint == src) { 351 _pending_non_safepoint_offset = pc_offset; 352 return; 353 } 354 ValueStack* vstack = debug_info(src); 355 if (vstack == NULL) return; 356 if (_pending_non_safepoint != NULL) { 357 // Got some old debug info. Get rid of it. 358 if (debug_info(_pending_non_safepoint) == vstack) { 359 _pending_non_safepoint_offset = pc_offset; 360 return; 361 } 362 if (_pending_non_safepoint_offset < pc_offset) { 363 record_non_safepoint_debug_info(); 364 } 365 _pending_non_safepoint = NULL; 366 } 367 // Remember the debug info. 368 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 369 _pending_non_safepoint = src; 370 _pending_non_safepoint_offset = pc_offset; 371 } 372 } 373 374 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 375 // Return NULL if n is too large. 376 // Returns the caller_bci for the next-younger state, also. 377 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 378 ValueStack* t = s; 379 for (int i = 0; i < n; i++) { 380 if (t == NULL) break; 381 t = t->caller_state(); 382 } 383 if (t == NULL) return NULL; 384 for (;;) { 385 ValueStack* tc = t->caller_state(); 386 if (tc == NULL) return s; 387 t = tc; 388 bci_result = tc->bci(); 389 s = s->caller_state(); 390 } 391 } 392 393 void LIR_Assembler::record_non_safepoint_debug_info() { 394 int pc_offset = _pending_non_safepoint_offset; 395 ValueStack* vstack = debug_info(_pending_non_safepoint); 396 int bci = vstack->bci(); 397 398 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 399 assert(debug_info->recording_non_safepoints(), "sanity"); 400 401 debug_info->add_non_safepoint(pc_offset); 402 403 // Visit scopes from oldest to youngest. 404 for (int n = 0; ; n++) { 405 int s_bci = bci; 406 ValueStack* s = nth_oldest(vstack, n, s_bci); 407 if (s == NULL) break; 408 IRScope* scope = s->scope(); 409 //Always pass false for reexecute since these ScopeDescs are never used for deopt 410 methodHandle null_mh; 411 debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/); 412 } 413 414 debug_info->end_non_safepoint(pc_offset); 415 } 416 417 418 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 419 return add_debug_info_for_null_check(code_offset(), cinfo); 420 } 421 422 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 423 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 424 append_code_stub(stub); 425 return stub; 426 } 427 428 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 429 add_debug_info_for_div0(code_offset(), info); 430 } 431 432 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 433 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 434 append_code_stub(stub); 435 } 436 437 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 438 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 439 } 440 441 442 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 443 verify_oop_map(op->info()); 444 445 if (os::is_MP()) { 446 // must align calls sites, otherwise they can't be updated atomically on MP hardware 447 align_call(op->code()); 448 } 449 450 // emit the static call stub stuff out of line 451 emit_static_call_stub(); 452 CHECK_BAILOUT(); 453 454 switch (op->code()) { 455 case lir_static_call: 456 case lir_dynamic_call: 457 call(op, relocInfo::static_call_type); 458 break; 459 case lir_optvirtual_call: 460 call(op, relocInfo::opt_virtual_call_type); 461 break; 462 case lir_icvirtual_call: 463 ic_call(op); 464 break; 465 case lir_virtual_call: 466 vtable_call(op); 467 break; 468 default: 469 fatal("unexpected op code: %s", op->name()); 470 break; 471 } 472 473 // JSR 292 474 // Record if this method has MethodHandle invokes. 475 if (op->is_method_handle_invoke()) { 476 compilation()->set_has_method_handle_invokes(true); 477 } 478 479 #if defined(X86) && defined(TIERED) 480 // C2 leave fpu stack dirty clean it 481 if (UseSSE < 2) { 482 int i; 483 for ( i = 1; i <= 7 ; i++ ) { 484 ffree(i); 485 } 486 if (!op->result_opr()->is_float_kind()) { 487 ffree(0); 488 } 489 } 490 #endif // X86 && TIERED 491 } 492 493 494 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 495 _masm->bind (*(op->label())); 496 } 497 498 499 void LIR_Assembler::emit_op1(LIR_Op1* op) { 500 switch (op->code()) { 501 case lir_move: 502 if (op->move_kind() == lir_move_volatile) { 503 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 504 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 505 } else { 506 move_op(op->in_opr(), op->result_opr(), op->type(), 507 op->patch_code(), op->info(), op->pop_fpu_stack(), 508 op->move_kind() == lir_move_unaligned, 509 op->move_kind() == lir_move_wide); 510 } 511 break; 512 513 case lir_roundfp: { 514 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 515 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 516 break; 517 } 518 519 case lir_return: 520 return_op(op->in_opr()); 521 break; 522 523 case lir_safepoint: 524 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 525 _masm->nop(); 526 } 527 safepoint_poll(op->in_opr(), op->info()); 528 break; 529 530 case lir_fxch: 531 fxch(op->in_opr()->as_jint()); 532 break; 533 534 case lir_fld: 535 fld(op->in_opr()->as_jint()); 536 break; 537 538 case lir_ffree: 539 ffree(op->in_opr()->as_jint()); 540 break; 541 542 case lir_branch: 543 break; 544 545 case lir_push: 546 push(op->in_opr()); 547 break; 548 549 case lir_pop: 550 pop(op->in_opr()); 551 break; 552 553 case lir_neg: 554 negate(op->in_opr(), op->result_opr()); 555 break; 556 557 case lir_leal: 558 leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info()); 559 break; 560 561 case lir_null_check: { 562 ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info()); 563 564 if (op->in_opr()->is_single_cpu()) { 565 _masm->null_check(op->in_opr()->as_register(), stub->entry()); 566 } else { 567 Unimplemented(); 568 } 569 break; 570 } 571 572 case lir_monaddr: 573 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 574 break; 575 576 #ifdef SPARC 577 case lir_pack64: 578 pack64(op->in_opr(), op->result_opr()); 579 break; 580 581 case lir_unpack64: 582 unpack64(op->in_opr(), op->result_opr()); 583 break; 584 #endif 585 586 case lir_unwind: 587 unwind_op(op->in_opr()); 588 break; 589 590 default: 591 Unimplemented(); 592 break; 593 } 594 } 595 596 597 void LIR_Assembler::emit_op0(LIR_Op0* op) { 598 switch (op->code()) { 599 case lir_word_align: { 600 _masm->align(BytesPerWord); 601 break; 602 } 603 604 case lir_nop: 605 assert(op->info() == NULL, "not supported"); 606 _masm->nop(); 607 break; 608 609 case lir_label: 610 Unimplemented(); 611 break; 612 613 case lir_build_frame: 614 build_frame(); 615 break; 616 617 case lir_std_entry: 618 // init offsets 619 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 620 _masm->align(CodeEntryAlignment); 621 if (needs_icache(compilation()->method())) { 622 check_icache(); 623 } 624 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 625 _masm->verified_entry(); 626 build_frame(); 627 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 628 break; 629 630 case lir_osr_entry: 631 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 632 osr_entry(); 633 break; 634 635 case lir_24bit_FPU: 636 set_24bit_FPU(); 637 break; 638 639 case lir_reset_FPU: 640 reset_FPU(); 641 break; 642 643 case lir_breakpoint: 644 breakpoint(); 645 break; 646 647 case lir_fpop_raw: 648 fpop(); 649 break; 650 651 case lir_membar: 652 membar(); 653 break; 654 655 case lir_membar_acquire: 656 membar_acquire(); 657 break; 658 659 case lir_membar_release: 660 membar_release(); 661 break; 662 663 case lir_membar_loadload: 664 membar_loadload(); 665 break; 666 667 case lir_membar_storestore: 668 membar_storestore(); 669 break; 670 671 case lir_membar_loadstore: 672 membar_loadstore(); 673 break; 674 675 case lir_membar_storeload: 676 membar_storeload(); 677 break; 678 679 case lir_get_thread: 680 get_thread(op->result_opr()); 681 break; 682 683 case lir_on_spin_wait: 684 on_spin_wait(); 685 break; 686 687 case lir_getfp: 688 getfp(op->result_opr()); 689 break; 690 691 case lir_getsp: 692 getsp(op->result_opr()); 693 break; 694 695 696 default: 697 ShouldNotReachHere(); 698 break; 699 } 700 } 701 702 703 void LIR_Assembler::emit_op2(LIR_Op2* op) { 704 switch (op->code()) { 705 case lir_cmp: 706 if (op->info() != NULL) { 707 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 708 "shouldn't be codeemitinfo for non-address operands"); 709 add_debug_info_for_null_check_here(op->info()); // exception possible 710 } 711 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 712 break; 713 714 case lir_cmp_l2i: 715 case lir_cmp_fd2i: 716 case lir_ucmp_fd2i: 717 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 718 break; 719 720 case lir_cmove: 721 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); 722 break; 723 724 case lir_shl: 725 case lir_shr: 726 case lir_ushr: 727 if (op->in_opr2()->is_constant()) { 728 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 729 } else { 730 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 731 } 732 break; 733 734 case lir_add: 735 case lir_sub: 736 case lir_mul: 737 case lir_mul_strictfp: 738 case lir_div: 739 case lir_div_strictfp: 740 case lir_rem: 741 assert(op->fpu_pop_count() < 2, ""); 742 arith_op( 743 op->code(), 744 op->in_opr1(), 745 op->in_opr2(), 746 op->result_opr(), 747 op->info(), 748 op->fpu_pop_count() == 1); 749 break; 750 751 case lir_abs: 752 case lir_sqrt: 753 case lir_tan: 754 case lir_log10: 755 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 756 break; 757 758 case lir_logic_and: 759 case lir_logic_or: 760 case lir_logic_xor: 761 logic_op( 762 op->code(), 763 op->in_opr1(), 764 op->in_opr2(), 765 op->result_opr()); 766 break; 767 768 case lir_throw: 769 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 770 break; 771 772 case lir_xadd: 773 case lir_xchg: 774 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 775 break; 776 777 default: 778 Unimplemented(); 779 break; 780 } 781 } 782 783 784 void LIR_Assembler::build_frame() { 785 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 786 } 787 788 789 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 790 assert((src->is_single_fpu() && dest->is_single_stack()) || 791 (src->is_double_fpu() && dest->is_double_stack()), 792 "round_fp: rounds register -> stack location"); 793 794 reg2stack (src, dest, src->type(), pop_fpu_stack); 795 } 796 797 798 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 799 if (src->is_register()) { 800 if (dest->is_register()) { 801 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 802 reg2reg(src, dest); 803 } else if (dest->is_stack()) { 804 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 805 reg2stack(src, dest, type, pop_fpu_stack); 806 } else if (dest->is_address()) { 807 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 808 } else { 809 ShouldNotReachHere(); 810 } 811 812 } else if (src->is_stack()) { 813 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 814 if (dest->is_register()) { 815 stack2reg(src, dest, type); 816 } else if (dest->is_stack()) { 817 stack2stack(src, dest, type); 818 } else { 819 ShouldNotReachHere(); 820 } 821 822 } else if (src->is_constant()) { 823 if (dest->is_register()) { 824 const2reg(src, dest, patch_code, info); // patching is possible 825 } else if (dest->is_stack()) { 826 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 827 const2stack(src, dest); 828 } else if (dest->is_address()) { 829 assert(patch_code == lir_patch_none, "no patching allowed here"); 830 const2mem(src, dest, type, info, wide); 831 } else { 832 ShouldNotReachHere(); 833 } 834 835 } else if (src->is_address()) { 836 mem2reg(src, dest, type, patch_code, info, wide, unaligned); 837 838 } else { 839 ShouldNotReachHere(); 840 } 841 } 842 843 844 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 845 #ifndef PRODUCT 846 if (VerifyOops) { 847 OopMapStream s(info->oop_map()); 848 while (!s.is_done()) { 849 OopMapValue v = s.current(); 850 if (v.is_oop()) { 851 VMReg r = v.reg(); 852 if (!r->is_stack()) { 853 stringStream st; 854 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 855 #ifdef SPARC 856 _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__); 857 #else 858 _masm->verify_oop(r->as_Register()); 859 #endif 860 } else { 861 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 862 } 863 } 864 check_codespace(); 865 CHECK_BAILOUT(); 866 867 s.next(); 868 } 869 } 870 #endif 871 }