1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_InstructionPrinter.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/barrierSet.hpp" 35 #include "runtime/os.hpp" 36 37 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 38 // We must have enough patching space so that call can be inserted. 39 // We cannot use fat nops here, since the concurrent code rewrite may transiently 40 // create the illegal instruction sequence. 41 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) { 42 _masm->nop(); 43 } 44 patch->install(_masm, patch_code, obj, info); 45 append_code_stub(patch); 46 47 #ifdef ASSERT 48 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 49 if (patch->id() == PatchingStub::access_field_id) { 50 switch (code) { 51 case Bytecodes::_putstatic: 52 case Bytecodes::_getstatic: 53 case Bytecodes::_putfield: 54 case Bytecodes::_getfield: 55 break; 56 default: 57 ShouldNotReachHere(); 58 } 59 } else if (patch->id() == PatchingStub::load_klass_id) { 60 switch (code) { 61 case Bytecodes::_new: 62 case Bytecodes::_anewarray: 63 case Bytecodes::_multianewarray: 64 case Bytecodes::_instanceof: 65 case Bytecodes::_checkcast: 66 break; 67 default: 68 ShouldNotReachHere(); 69 } 70 } else if (patch->id() == PatchingStub::load_mirror_id) { 71 switch (code) { 72 case Bytecodes::_putstatic: 73 case Bytecodes::_getstatic: 74 case Bytecodes::_ldc: 75 case Bytecodes::_ldc_w: 76 break; 77 default: 78 ShouldNotReachHere(); 79 } 80 } else if (patch->id() == PatchingStub::load_appendix_id) { 81 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); 82 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); 83 } else { 84 ShouldNotReachHere(); 85 } 86 #endif 87 } 88 89 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { 90 IRScope* scope = info->scope(); 91 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); 92 if (Bytecodes::has_optional_appendix(bc_raw)) { 93 return PatchingStub::load_appendix_id; 94 } 95 return PatchingStub::load_mirror_id; 96 } 97 98 //--------------------------------------------------------------- 99 100 101 LIR_Assembler::LIR_Assembler(Compilation* c): 102 _masm(c->masm()) 103 , _bs(BarrierSet::barrier_set()) 104 , _compilation(c) 105 , _frame_map(c->frame_map()) 106 , _current_block(NULL) 107 , _pending_non_safepoint(NULL) 108 , _pending_non_safepoint_offset(0) 109 { 110 _slow_case_stubs = new CodeStubList(); 111 } 112 113 114 LIR_Assembler::~LIR_Assembler() { 115 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out. 116 // Reset it here to avoid an assertion. 117 _unwind_handler_entry.reset(); 118 } 119 120 121 void LIR_Assembler::check_codespace() { 122 CodeSection* cs = _masm->code_section(); 123 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 124 BAILOUT("CodeBuffer overflow"); 125 } 126 } 127 128 129 void LIR_Assembler::append_code_stub(CodeStub* stub) { 130 _slow_case_stubs->append(stub); 131 } 132 133 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 134 for (int m = 0; m < stub_list->length(); m++) { 135 CodeStub* s = stub_list->at(m); 136 137 check_codespace(); 138 CHECK_BAILOUT(); 139 140 #ifndef PRODUCT 141 if (CommentedAssembly) { 142 stringStream st; 143 s->print_name(&st); 144 st.print(" slow case"); 145 _masm->block_comment(st.as_string()); 146 } 147 #endif 148 s->emit_code(this); 149 #ifdef ASSERT 150 s->assert_no_unbound_labels(); 151 #endif 152 } 153 } 154 155 156 void LIR_Assembler::emit_slow_case_stubs() { 157 emit_stubs(_slow_case_stubs); 158 } 159 160 161 bool LIR_Assembler::needs_icache(ciMethod* method) const { 162 return !method->is_static(); 163 } 164 165 166 int LIR_Assembler::code_offset() const { 167 return _masm->offset(); 168 } 169 170 171 address LIR_Assembler::pc() const { 172 return _masm->pc(); 173 } 174 175 // To bang the stack of this compiled method we use the stack size 176 // that the interpreter would need in case of a deoptimization. This 177 // removes the need to bang the stack in the deoptimization blob which 178 // in turn simplifies stack overflow handling. 179 int LIR_Assembler::bang_size_in_bytes() const { 180 return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size()); 181 } 182 183 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 184 for (int i = 0; i < info_list->length(); i++) { 185 XHandlers* handlers = info_list->at(i)->exception_handlers(); 186 187 for (int j = 0; j < handlers->length(); j++) { 188 XHandler* handler = handlers->handler_at(j); 189 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 190 assert(handler->entry_code() == NULL || 191 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 192 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 193 194 if (handler->entry_pco() == -1) { 195 // entry code not emitted yet 196 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 197 handler->set_entry_pco(code_offset()); 198 if (CommentedAssembly) { 199 _masm->block_comment("Exception adapter block"); 200 } 201 emit_lir_list(handler->entry_code()); 202 } else { 203 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 204 } 205 206 assert(handler->entry_pco() != -1, "must be set now"); 207 } 208 } 209 } 210 } 211 212 213 void LIR_Assembler::emit_code(BlockList* hir) { 214 if (PrintLIR) { 215 print_LIR(hir); 216 } 217 218 int n = hir->length(); 219 for (int i = 0; i < n; i++) { 220 emit_block(hir->at(i)); 221 CHECK_BAILOUT(); 222 } 223 224 flush_debug_info(code_offset()); 225 226 DEBUG_ONLY(check_no_unbound_labels()); 227 } 228 229 230 void LIR_Assembler::emit_block(BlockBegin* block) { 231 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 232 align_backward_branch_target(); 233 } 234 235 // if this block is the start of an exception handler, record the 236 // PC offset of the first instruction for later construction of 237 // the ExceptionHandlerTable 238 if (block->is_set(BlockBegin::exception_entry_flag)) { 239 block->set_exception_handler_pco(code_offset()); 240 } 241 242 #ifndef PRODUCT 243 if (PrintLIRWithAssembly) { 244 // don't print Phi's 245 InstructionPrinter ip(false); 246 block->print(ip); 247 } 248 #endif /* PRODUCT */ 249 250 assert(block->lir() != NULL, "must have LIR"); 251 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 252 253 #ifndef PRODUCT 254 if (CommentedAssembly) { 255 stringStream st; 256 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 257 _masm->block_comment(st.as_string()); 258 } 259 #endif 260 261 emit_lir_list(block->lir()); 262 263 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 264 } 265 266 267 void LIR_Assembler::emit_lir_list(LIR_List* list) { 268 peephole(list); 269 270 int n = list->length(); 271 for (int i = 0; i < n; i++) { 272 LIR_Op* op = list->at(i); 273 274 check_codespace(); 275 CHECK_BAILOUT(); 276 277 #ifndef PRODUCT 278 if (CommentedAssembly) { 279 // Don't record out every op since that's too verbose. Print 280 // branches since they include block and stub names. Also print 281 // patching moves since they generate funny looking code. 282 if (op->code() == lir_branch || 283 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) || 284 (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) { 285 stringStream st; 286 op->print_on(&st); 287 _masm->block_comment(st.as_string()); 288 } 289 } 290 if (PrintLIRWithAssembly) { 291 // print out the LIR operation followed by the resulting assembly 292 list->at(i)->print(); tty->cr(); 293 } 294 #endif /* PRODUCT */ 295 296 op->emit_code(this); 297 298 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 299 process_debug_info(op); 300 } 301 302 #ifndef PRODUCT 303 if (PrintLIRWithAssembly) { 304 _masm->code()->decode(); 305 } 306 #endif /* PRODUCT */ 307 } 308 } 309 310 #ifdef ASSERT 311 void LIR_Assembler::check_no_unbound_labels() { 312 CHECK_BAILOUT(); 313 314 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 315 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 316 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 317 assert(false, "unbound label"); 318 } 319 } 320 } 321 #endif 322 323 //----------------------------------debug info-------------------------------- 324 325 326 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 327 int pc_offset = code_offset(); 328 flush_debug_info(pc_offset); 329 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 330 if (info->exception_handlers() != NULL) { 331 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 332 } 333 } 334 335 336 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 337 flush_debug_info(pc_offset); 338 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 339 if (cinfo->exception_handlers() != NULL) { 340 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 341 } 342 } 343 344 static ValueStack* debug_info(Instruction* ins) { 345 StateSplit* ss = ins->as_StateSplit(); 346 if (ss != NULL) return ss->state(); 347 return ins->state_before(); 348 } 349 350 void LIR_Assembler::process_debug_info(LIR_Op* op) { 351 Instruction* src = op->source(); 352 if (src == NULL) return; 353 int pc_offset = code_offset(); 354 if (_pending_non_safepoint == src) { 355 _pending_non_safepoint_offset = pc_offset; 356 return; 357 } 358 ValueStack* vstack = debug_info(src); 359 if (vstack == NULL) return; 360 if (_pending_non_safepoint != NULL) { 361 // Got some old debug info. Get rid of it. 362 if (debug_info(_pending_non_safepoint) == vstack) { 363 _pending_non_safepoint_offset = pc_offset; 364 return; 365 } 366 if (_pending_non_safepoint_offset < pc_offset) { 367 record_non_safepoint_debug_info(); 368 } 369 _pending_non_safepoint = NULL; 370 } 371 // Remember the debug info. 372 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 373 _pending_non_safepoint = src; 374 _pending_non_safepoint_offset = pc_offset; 375 } 376 } 377 378 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 379 // Return NULL if n is too large. 380 // Returns the caller_bci for the next-younger state, also. 381 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 382 ValueStack* t = s; 383 for (int i = 0; i < n; i++) { 384 if (t == NULL) break; 385 t = t->caller_state(); 386 } 387 if (t == NULL) return NULL; 388 for (;;) { 389 ValueStack* tc = t->caller_state(); 390 if (tc == NULL) return s; 391 t = tc; 392 bci_result = tc->bci(); 393 s = s->caller_state(); 394 } 395 } 396 397 void LIR_Assembler::record_non_safepoint_debug_info() { 398 int pc_offset = _pending_non_safepoint_offset; 399 ValueStack* vstack = debug_info(_pending_non_safepoint); 400 int bci = vstack->bci(); 401 402 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 403 assert(debug_info->recording_non_safepoints(), "sanity"); 404 405 debug_info->add_non_safepoint(pc_offset); 406 407 // Visit scopes from oldest to youngest. 408 for (int n = 0; ; n++) { 409 int s_bci = bci; 410 ValueStack* s = nth_oldest(vstack, n, s_bci); 411 if (s == NULL) break; 412 IRScope* scope = s->scope(); 413 //Always pass false for reexecute since these ScopeDescs are never used for deopt 414 methodHandle null_mh; 415 debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/); 416 } 417 418 debug_info->end_non_safepoint(pc_offset); 419 } 420 421 422 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 423 return add_debug_info_for_null_check(code_offset(), cinfo); 424 } 425 426 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 427 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 428 append_code_stub(stub); 429 return stub; 430 } 431 432 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 433 add_debug_info_for_div0(code_offset(), info); 434 } 435 436 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 437 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 438 append_code_stub(stub); 439 } 440 441 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 442 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 443 } 444 445 446 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 447 verify_oop_map(op->info()); 448 449 // must align calls sites, otherwise they can't be updated atomically 450 align_call(op->code()); 451 452 // emit the static call stub stuff out of line 453 emit_static_call_stub(); 454 CHECK_BAILOUT(); 455 456 switch (op->code()) { 457 case lir_static_call: 458 case lir_dynamic_call: 459 call(op, relocInfo::static_call_type); 460 break; 461 case lir_optvirtual_call: 462 call(op, relocInfo::opt_virtual_call_type); 463 break; 464 case lir_icvirtual_call: 465 ic_call(op); 466 break; 467 case lir_virtual_call: 468 vtable_call(op); 469 break; 470 default: 471 fatal("unexpected op code: %s", op->name()); 472 break; 473 } 474 475 // JSR 292 476 // Record if this method has MethodHandle invokes. 477 if (op->is_method_handle_invoke()) { 478 compilation()->set_has_method_handle_invokes(true); 479 } 480 481 #if defined(X86) && defined(TIERED) 482 // C2 leave fpu stack dirty clean it 483 if (UseSSE < 2) { 484 int i; 485 for ( i = 1; i <= 7 ; i++ ) { 486 ffree(i); 487 } 488 if (!op->result_opr()->is_float_kind()) { 489 ffree(0); 490 } 491 } 492 #endif // X86 && TIERED 493 } 494 495 496 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 497 _masm->bind (*(op->label())); 498 } 499 500 501 void LIR_Assembler::emit_op1(LIR_Op1* op) { 502 switch (op->code()) { 503 case lir_move: 504 if (op->move_kind() == lir_move_volatile) { 505 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 506 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 507 } else { 508 move_op(op->in_opr(), op->result_opr(), op->type(), 509 op->patch_code(), op->info(), op->pop_fpu_stack(), 510 op->move_kind() == lir_move_unaligned, 511 op->move_kind() == lir_move_wide); 512 } 513 break; 514 515 case lir_roundfp: { 516 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 517 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 518 break; 519 } 520 521 case lir_return: 522 return_op(op->in_opr()); 523 break; 524 525 case lir_safepoint: 526 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 527 _masm->nop(); 528 } 529 safepoint_poll(op->in_opr(), op->info()); 530 break; 531 532 case lir_fxch: 533 fxch(op->in_opr()->as_jint()); 534 break; 535 536 case lir_fld: 537 fld(op->in_opr()->as_jint()); 538 break; 539 540 case lir_ffree: 541 ffree(op->in_opr()->as_jint()); 542 break; 543 544 case lir_branch: 545 break; 546 547 case lir_push: 548 push(op->in_opr()); 549 break; 550 551 case lir_pop: 552 pop(op->in_opr()); 553 break; 554 555 case lir_leal: 556 leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info()); 557 break; 558 559 case lir_null_check: { 560 ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info()); 561 562 if (op->in_opr()->is_single_cpu()) { 563 _masm->null_check(op->in_opr()->as_register(), stub->entry()); 564 } else { 565 Unimplemented(); 566 } 567 break; 568 } 569 570 case lir_monaddr: 571 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 572 break; 573 574 #ifdef SPARC 575 case lir_pack64: 576 pack64(op->in_opr(), op->result_opr()); 577 break; 578 579 case lir_unpack64: 580 unpack64(op->in_opr(), op->result_opr()); 581 break; 582 #endif 583 584 case lir_unwind: 585 unwind_op(op->in_opr()); 586 break; 587 588 default: 589 Unimplemented(); 590 break; 591 } 592 } 593 594 595 void LIR_Assembler::emit_op0(LIR_Op0* op) { 596 switch (op->code()) { 597 case lir_word_align: { 598 _masm->align(BytesPerWord); 599 break; 600 } 601 602 case lir_nop: 603 assert(op->info() == NULL, "not supported"); 604 _masm->nop(); 605 break; 606 607 case lir_label: 608 Unimplemented(); 609 break; 610 611 case lir_build_frame: 612 build_frame(); 613 break; 614 615 case lir_std_entry: 616 // init offsets 617 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 618 _masm->align(CodeEntryAlignment); 619 if (needs_icache(compilation()->method())) { 620 check_icache(); 621 } 622 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 623 _masm->verified_entry(); 624 build_frame(); 625 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 626 break; 627 628 case lir_osr_entry: 629 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 630 osr_entry(); 631 break; 632 633 case lir_24bit_FPU: 634 set_24bit_FPU(); 635 break; 636 637 case lir_reset_FPU: 638 reset_FPU(); 639 break; 640 641 case lir_breakpoint: 642 breakpoint(); 643 break; 644 645 case lir_fpop_raw: 646 fpop(); 647 break; 648 649 case lir_membar: 650 membar(); 651 break; 652 653 case lir_membar_acquire: 654 membar_acquire(); 655 break; 656 657 case lir_membar_release: 658 membar_release(); 659 break; 660 661 case lir_membar_loadload: 662 membar_loadload(); 663 break; 664 665 case lir_membar_storestore: 666 membar_storestore(); 667 break; 668 669 case lir_membar_loadstore: 670 membar_loadstore(); 671 break; 672 673 case lir_membar_storeload: 674 membar_storeload(); 675 break; 676 677 case lir_get_thread: 678 get_thread(op->result_opr()); 679 break; 680 681 case lir_on_spin_wait: 682 on_spin_wait(); 683 break; 684 685 default: 686 ShouldNotReachHere(); 687 break; 688 } 689 } 690 691 692 void LIR_Assembler::emit_op2(LIR_Op2* op) { 693 switch (op->code()) { 694 case lir_cmp: 695 if (op->info() != NULL) { 696 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 697 "shouldn't be codeemitinfo for non-address operands"); 698 add_debug_info_for_null_check_here(op->info()); // exception possible 699 } 700 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 701 break; 702 703 case lir_cmp_l2i: 704 case lir_cmp_fd2i: 705 case lir_ucmp_fd2i: 706 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 707 break; 708 709 case lir_cmove: 710 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); 711 break; 712 713 case lir_shl: 714 case lir_shr: 715 case lir_ushr: 716 if (op->in_opr2()->is_constant()) { 717 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 718 } else { 719 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 720 } 721 break; 722 723 case lir_add: 724 case lir_sub: 725 case lir_mul: 726 case lir_mul_strictfp: 727 case lir_div: 728 case lir_div_strictfp: 729 case lir_rem: 730 assert(op->fpu_pop_count() < 2, ""); 731 arith_op( 732 op->code(), 733 op->in_opr1(), 734 op->in_opr2(), 735 op->result_opr(), 736 op->info(), 737 op->fpu_pop_count() == 1); 738 break; 739 740 case lir_abs: 741 case lir_sqrt: 742 case lir_tan: 743 case lir_log10: 744 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 745 break; 746 747 case lir_neg: 748 negate(op->in_opr1(), op->result_opr(), op->in_opr2()); 749 break; 750 751 case lir_logic_and: 752 case lir_logic_or: 753 case lir_logic_xor: 754 logic_op( 755 op->code(), 756 op->in_opr1(), 757 op->in_opr2(), 758 op->result_opr()); 759 break; 760 761 case lir_throw: 762 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 763 break; 764 765 case lir_xadd: 766 case lir_xchg: 767 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 768 break; 769 770 default: 771 Unimplemented(); 772 break; 773 } 774 } 775 776 777 void LIR_Assembler::build_frame() { 778 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 779 } 780 781 782 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 783 assert((src->is_single_fpu() && dest->is_single_stack()) || 784 (src->is_double_fpu() && dest->is_double_stack()), 785 "round_fp: rounds register -> stack location"); 786 787 reg2stack (src, dest, src->type(), pop_fpu_stack); 788 } 789 790 791 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 792 if (src->is_register()) { 793 if (dest->is_register()) { 794 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 795 reg2reg(src, dest); 796 } else if (dest->is_stack()) { 797 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 798 reg2stack(src, dest, type, pop_fpu_stack); 799 } else if (dest->is_address()) { 800 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 801 } else { 802 ShouldNotReachHere(); 803 } 804 805 } else if (src->is_stack()) { 806 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 807 if (dest->is_register()) { 808 stack2reg(src, dest, type); 809 } else if (dest->is_stack()) { 810 stack2stack(src, dest, type); 811 } else { 812 ShouldNotReachHere(); 813 } 814 815 } else if (src->is_constant()) { 816 if (dest->is_register()) { 817 const2reg(src, dest, patch_code, info); // patching is possible 818 } else if (dest->is_stack()) { 819 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 820 const2stack(src, dest); 821 } else if (dest->is_address()) { 822 assert(patch_code == lir_patch_none, "no patching allowed here"); 823 const2mem(src, dest, type, info, wide); 824 } else { 825 ShouldNotReachHere(); 826 } 827 828 } else if (src->is_address()) { 829 mem2reg(src, dest, type, patch_code, info, wide, unaligned); 830 831 } else { 832 ShouldNotReachHere(); 833 } 834 } 835 836 837 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 838 #ifndef PRODUCT 839 if (VerifyOops) { 840 OopMapStream s(info->oop_map()); 841 while (!s.is_done()) { 842 OopMapValue v = s.current(); 843 if (v.is_oop()) { 844 VMReg r = v.reg(); 845 if (!r->is_stack()) { 846 stringStream st; 847 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 848 #ifdef SPARC 849 _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__); 850 #else 851 _masm->verify_oop(r->as_Register()); 852 #endif 853 } else { 854 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 855 } 856 } 857 check_codespace(); 858 CHECK_BAILOUT(); 859 860 s.next(); 861 } 862 } 863 #endif 864 }