1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_InstructionPrinter.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/barrierSet.hpp" 35 #include "runtime/os.hpp" 36 37 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 38 // We must have enough patching space so that call can be inserted. 39 // We cannot use fat nops here, since the concurrent code rewrite may transiently 40 // create the illegal instruction sequence. 41 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) { 42 _masm->nop(); 43 } 44 patch->install(_masm, patch_code, obj, info); 45 append_code_stub(patch); 46 47 #ifdef ASSERT 48 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 49 if (patch->id() == PatchingStub::access_field_id) { 50 switch (code) { 51 case Bytecodes::_putstatic: 52 case Bytecodes::_getstatic: 53 case Bytecodes::_putfield: 54 case Bytecodes::_getfield: 55 break; 56 default: 57 ShouldNotReachHere(); 58 } 59 } else if (patch->id() == PatchingStub::load_klass_id) { 60 switch (code) { 61 case Bytecodes::_new: 62 case Bytecodes::_defaultvalue: 63 case Bytecodes::_anewarray: 64 case Bytecodes::_multianewarray: 65 case Bytecodes::_instanceof: 66 case Bytecodes::_checkcast: 67 break; 68 default: 69 ShouldNotReachHere(); 70 } 71 } else if (patch->id() == PatchingStub::load_mirror_id) { 72 switch (code) { 73 case Bytecodes::_putstatic: 74 case Bytecodes::_getstatic: 75 case Bytecodes::_ldc: 76 case Bytecodes::_ldc_w: 77 break; 78 default: 79 ShouldNotReachHere(); 80 } 81 } else if (patch->id() == PatchingStub::load_appendix_id) { 82 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); 83 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); 84 } else { 85 ShouldNotReachHere(); 86 } 87 #endif 88 } 89 90 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { 91 IRScope* scope = info->scope(); 92 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); 93 if (Bytecodes::has_optional_appendix(bc_raw)) { 94 return PatchingStub::load_appendix_id; 95 } 96 return PatchingStub::load_mirror_id; 97 } 98 99 //--------------------------------------------------------------- 100 101 102 LIR_Assembler::LIR_Assembler(Compilation* c): 103 _masm(c->masm()) 104 , _bs(BarrierSet::barrier_set()) 105 , _compilation(c) 106 , _frame_map(c->frame_map()) 107 , _current_block(NULL) 108 , _pending_non_safepoint(NULL) 109 , _pending_non_safepoint_offset(0) 110 { 111 _slow_case_stubs = new CodeStubList(); 112 } 113 114 115 LIR_Assembler::~LIR_Assembler() { 116 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out. 117 // Reset it here to avoid an assertion. 118 _unwind_handler_entry.reset(); 119 } 120 121 122 void LIR_Assembler::check_codespace() { 123 CodeSection* cs = _masm->code_section(); 124 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 125 BAILOUT("CodeBuffer overflow"); 126 } 127 } 128 129 130 void LIR_Assembler::append_code_stub(CodeStub* stub) { 131 _slow_case_stubs->append(stub); 132 } 133 134 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 135 for (int m = 0; m < stub_list->length(); m++) { 136 CodeStub* s = stub_list->at(m); 137 138 check_codespace(); 139 CHECK_BAILOUT(); 140 141 #ifndef PRODUCT 142 if (CommentedAssembly) { 143 stringStream st; 144 s->print_name(&st); 145 st.print(" slow case"); 146 _masm->block_comment(st.as_string()); 147 } 148 #endif 149 s->emit_code(this); 150 #ifdef ASSERT 151 s->assert_no_unbound_labels(); 152 #endif 153 } 154 } 155 156 157 void LIR_Assembler::emit_slow_case_stubs() { 158 emit_stubs(_slow_case_stubs); 159 } 160 161 162 bool LIR_Assembler::needs_icache(ciMethod* method) const { 163 return !method->is_static(); 164 } 165 166 167 int LIR_Assembler::code_offset() const { 168 return _masm->offset(); 169 } 170 171 172 address LIR_Assembler::pc() const { 173 return _masm->pc(); 174 } 175 176 // To bang the stack of this compiled method we use the stack size 177 // that the interpreter would need in case of a deoptimization. This 178 // removes the need to bang the stack in the deoptimization blob which 179 // in turn simplifies stack overflow handling. 180 int LIR_Assembler::bang_size_in_bytes() const { 181 return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size()); 182 } 183 184 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 185 for (int i = 0; i < info_list->length(); i++) { 186 XHandlers* handlers = info_list->at(i)->exception_handlers(); 187 188 for (int j = 0; j < handlers->length(); j++) { 189 XHandler* handler = handlers->handler_at(j); 190 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 191 assert(handler->entry_code() == NULL || 192 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 193 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 194 195 if (handler->entry_pco() == -1) { 196 // entry code not emitted yet 197 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 198 handler->set_entry_pco(code_offset()); 199 if (CommentedAssembly) { 200 _masm->block_comment("Exception adapter block"); 201 } 202 emit_lir_list(handler->entry_code()); 203 } else { 204 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 205 } 206 207 assert(handler->entry_pco() != -1, "must be set now"); 208 } 209 } 210 } 211 } 212 213 214 void LIR_Assembler::emit_code(BlockList* hir) { 215 if (PrintLIR) { 216 print_LIR(hir); 217 } 218 219 int n = hir->length(); 220 for (int i = 0; i < n; i++) { 221 emit_block(hir->at(i)); 222 CHECK_BAILOUT(); 223 } 224 225 flush_debug_info(code_offset()); 226 227 DEBUG_ONLY(check_no_unbound_labels()); 228 } 229 230 231 void LIR_Assembler::emit_block(BlockBegin* block) { 232 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 233 align_backward_branch_target(); 234 } 235 236 // if this block is the start of an exception handler, record the 237 // PC offset of the first instruction for later construction of 238 // the ExceptionHandlerTable 239 if (block->is_set(BlockBegin::exception_entry_flag)) { 240 block->set_exception_handler_pco(code_offset()); 241 } 242 243 #ifndef PRODUCT 244 if (PrintLIRWithAssembly) { 245 // don't print Phi's 246 InstructionPrinter ip(false); 247 block->print(ip); 248 } 249 #endif /* PRODUCT */ 250 251 assert(block->lir() != NULL, "must have LIR"); 252 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 253 254 #ifndef PRODUCT 255 if (CommentedAssembly) { 256 stringStream st; 257 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 258 _masm->block_comment(st.as_string()); 259 } 260 #endif 261 262 emit_lir_list(block->lir()); 263 264 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 265 } 266 267 268 void LIR_Assembler::emit_lir_list(LIR_List* list) { 269 peephole(list); 270 271 int n = list->length(); 272 for (int i = 0; i < n; i++) { 273 LIR_Op* op = list->at(i); 274 275 check_codespace(); 276 CHECK_BAILOUT(); 277 278 #ifndef PRODUCT 279 if (CommentedAssembly) { 280 // Don't record out every op since that's too verbose. Print 281 // branches since they include block and stub names. Also print 282 // patching moves since they generate funny looking code. 283 if (op->code() == lir_branch || 284 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) || 285 (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) { 286 stringStream st; 287 op->print_on(&st); 288 _masm->block_comment(st.as_string()); 289 } 290 } 291 if (PrintLIRWithAssembly) { 292 // print out the LIR operation followed by the resulting assembly 293 list->at(i)->print(); tty->cr(); 294 } 295 #endif /* PRODUCT */ 296 297 op->emit_code(this); 298 299 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 300 process_debug_info(op); 301 } 302 303 #ifndef PRODUCT 304 if (PrintLIRWithAssembly) { 305 _masm->code()->decode(); 306 } 307 #endif /* PRODUCT */ 308 } 309 } 310 311 #ifdef ASSERT 312 void LIR_Assembler::check_no_unbound_labels() { 313 CHECK_BAILOUT(); 314 315 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 316 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 317 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 318 assert(false, "unbound label"); 319 } 320 } 321 } 322 #endif 323 324 //----------------------------------debug info-------------------------------- 325 326 327 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 328 int pc_offset = code_offset(); 329 flush_debug_info(pc_offset); 330 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 331 if (info->exception_handlers() != NULL) { 332 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 333 } 334 } 335 336 337 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 338 flush_debug_info(pc_offset); 339 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 340 if (cinfo->exception_handlers() != NULL) { 341 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 342 } 343 } 344 345 static ValueStack* debug_info(Instruction* ins) { 346 StateSplit* ss = ins->as_StateSplit(); 347 if (ss != NULL) return ss->state(); 348 return ins->state_before(); 349 } 350 351 void LIR_Assembler::process_debug_info(LIR_Op* op) { 352 Instruction* src = op->source(); 353 if (src == NULL) return; 354 int pc_offset = code_offset(); 355 if (_pending_non_safepoint == src) { 356 _pending_non_safepoint_offset = pc_offset; 357 return; 358 } 359 ValueStack* vstack = debug_info(src); 360 if (vstack == NULL) return; 361 if (_pending_non_safepoint != NULL) { 362 // Got some old debug info. Get rid of it. 363 if (debug_info(_pending_non_safepoint) == vstack) { 364 _pending_non_safepoint_offset = pc_offset; 365 return; 366 } 367 if (_pending_non_safepoint_offset < pc_offset) { 368 record_non_safepoint_debug_info(); 369 } 370 _pending_non_safepoint = NULL; 371 } 372 // Remember the debug info. 373 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 374 _pending_non_safepoint = src; 375 _pending_non_safepoint_offset = pc_offset; 376 } 377 } 378 379 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 380 // Return NULL if n is too large. 381 // Returns the caller_bci for the next-younger state, also. 382 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 383 ValueStack* t = s; 384 for (int i = 0; i < n; i++) { 385 if (t == NULL) break; 386 t = t->caller_state(); 387 } 388 if (t == NULL) return NULL; 389 for (;;) { 390 ValueStack* tc = t->caller_state(); 391 if (tc == NULL) return s; 392 t = tc; 393 bci_result = tc->bci(); 394 s = s->caller_state(); 395 } 396 } 397 398 void LIR_Assembler::record_non_safepoint_debug_info() { 399 int pc_offset = _pending_non_safepoint_offset; 400 ValueStack* vstack = debug_info(_pending_non_safepoint); 401 int bci = vstack->bci(); 402 403 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 404 assert(debug_info->recording_non_safepoints(), "sanity"); 405 406 debug_info->add_non_safepoint(pc_offset); 407 408 // Visit scopes from oldest to youngest. 409 for (int n = 0; ; n++) { 410 int s_bci = bci; 411 ValueStack* s = nth_oldest(vstack, n, s_bci); 412 if (s == NULL) break; 413 IRScope* scope = s->scope(); 414 //Always pass false for reexecute since these ScopeDescs are never used for deopt 415 methodHandle null_mh; 416 debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/); 417 } 418 419 debug_info->end_non_safepoint(pc_offset); 420 } 421 422 423 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 424 return add_debug_info_for_null_check(code_offset(), cinfo); 425 } 426 427 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 428 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 429 append_code_stub(stub); 430 return stub; 431 } 432 433 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 434 add_debug_info_for_div0(code_offset(), info); 435 } 436 437 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 438 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 439 append_code_stub(stub); 440 } 441 442 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 443 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 444 } 445 446 447 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 448 verify_oop_map(op->info()); 449 450 // must align calls sites, otherwise they can't be updated atomically 451 align_call(op->code()); 452 453 // emit the static call stub stuff out of line 454 emit_static_call_stub(); 455 CHECK_BAILOUT(); 456 457 switch (op->code()) { 458 case lir_static_call: 459 case lir_dynamic_call: 460 call(op, relocInfo::static_call_type); 461 break; 462 case lir_optvirtual_call: 463 call(op, relocInfo::opt_virtual_call_type); 464 break; 465 case lir_icvirtual_call: 466 ic_call(op); 467 break; 468 case lir_virtual_call: 469 vtable_call(op); 470 break; 471 default: 472 fatal("unexpected op code: %s", op->name()); 473 break; 474 } 475 476 // JSR 292 477 // Record if this method has MethodHandle invokes. 478 if (op->is_method_handle_invoke()) { 479 compilation()->set_has_method_handle_invokes(true); 480 } 481 482 #if defined(X86) && defined(TIERED) 483 // C2 leave fpu stack dirty clean it 484 if (UseSSE < 2) { 485 int i; 486 for ( i = 1; i <= 7 ; i++ ) { 487 ffree(i); 488 } 489 if (!op->result_opr()->is_float_kind()) { 490 ffree(0); 491 } 492 } 493 #endif // X86 && TIERED 494 } 495 496 497 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 498 _masm->bind (*(op->label())); 499 } 500 501 502 void LIR_Assembler::emit_op1(LIR_Op1* op) { 503 switch (op->code()) { 504 case lir_move: 505 if (op->move_kind() == lir_move_volatile) { 506 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 507 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 508 } else { 509 move_op(op->in_opr(), op->result_opr(), op->type(), 510 op->patch_code(), op->info(), op->pop_fpu_stack(), 511 op->move_kind() == lir_move_unaligned, 512 op->move_kind() == lir_move_wide); 513 } 514 break; 515 516 case lir_roundfp: { 517 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 518 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 519 break; 520 } 521 522 case lir_return: 523 return_op(op->in_opr()); 524 break; 525 526 case lir_safepoint: 527 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 528 _masm->nop(); 529 } 530 safepoint_poll(op->in_opr(), op->info()); 531 break; 532 533 case lir_fxch: 534 fxch(op->in_opr()->as_jint()); 535 break; 536 537 case lir_fld: 538 fld(op->in_opr()->as_jint()); 539 break; 540 541 case lir_ffree: 542 ffree(op->in_opr()->as_jint()); 543 break; 544 545 case lir_branch: 546 break; 547 548 case lir_push: 549 push(op->in_opr()); 550 break; 551 552 case lir_pop: 553 pop(op->in_opr()); 554 break; 555 556 case lir_leal: 557 leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info()); 558 break; 559 560 case lir_null_check: { 561 ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info()); 562 563 if (op->in_opr()->is_single_cpu()) { 564 _masm->null_check(op->in_opr()->as_register(), stub->entry()); 565 } else { 566 Unimplemented(); 567 } 568 break; 569 } 570 571 case lir_monaddr: 572 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 573 break; 574 575 #ifdef SPARC 576 case lir_pack64: 577 pack64(op->in_opr(), op->result_opr()); 578 break; 579 580 case lir_unpack64: 581 unpack64(op->in_opr(), op->result_opr()); 582 break; 583 #endif 584 585 case lir_unwind: 586 unwind_op(op->in_opr()); 587 break; 588 589 default: 590 Unimplemented(); 591 break; 592 } 593 } 594 595 596 void LIR_Assembler::emit_op0(LIR_Op0* op) { 597 switch (op->code()) { 598 case lir_word_align: { 599 _masm->align(BytesPerWord); 600 break; 601 } 602 603 case lir_nop: 604 assert(op->info() == NULL, "not supported"); 605 _masm->nop(); 606 break; 607 608 case lir_label: 609 Unimplemented(); 610 break; 611 612 case lir_build_frame: 613 build_frame(); 614 break; 615 616 case lir_std_entry: 617 // init offsets 618 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 619 _masm->align(CodeEntryAlignment); 620 if (needs_icache(compilation()->method())) { 621 check_icache(); 622 } 623 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 624 _masm->verified_entry(); 625 build_frame(); 626 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 627 break; 628 629 case lir_osr_entry: 630 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 631 osr_entry(); 632 break; 633 634 case lir_24bit_FPU: 635 set_24bit_FPU(); 636 break; 637 638 case lir_reset_FPU: 639 reset_FPU(); 640 break; 641 642 case lir_breakpoint: 643 breakpoint(); 644 break; 645 646 case lir_fpop_raw: 647 fpop(); 648 break; 649 650 case lir_membar: 651 membar(); 652 break; 653 654 case lir_membar_acquire: 655 membar_acquire(); 656 break; 657 658 case lir_membar_release: 659 membar_release(); 660 break; 661 662 case lir_membar_loadload: 663 membar_loadload(); 664 break; 665 666 case lir_membar_storestore: 667 membar_storestore(); 668 break; 669 670 case lir_membar_loadstore: 671 membar_loadstore(); 672 break; 673 674 case lir_membar_storeload: 675 membar_storeload(); 676 break; 677 678 case lir_get_thread: 679 get_thread(op->result_opr()); 680 break; 681 682 case lir_on_spin_wait: 683 on_spin_wait(); 684 break; 685 686 default: 687 ShouldNotReachHere(); 688 break; 689 } 690 } 691 692 693 void LIR_Assembler::emit_op2(LIR_Op2* op) { 694 switch (op->code()) { 695 case lir_cmp: 696 if (op->info() != NULL) { 697 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 698 "shouldn't be codeemitinfo for non-address operands"); 699 add_debug_info_for_null_check_here(op->info()); // exception possible 700 } 701 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 702 break; 703 704 case lir_cmp_l2i: 705 case lir_cmp_fd2i: 706 case lir_ucmp_fd2i: 707 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 708 break; 709 710 case lir_cmove: 711 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); 712 break; 713 714 case lir_shl: 715 case lir_shr: 716 case lir_ushr: 717 if (op->in_opr2()->is_constant()) { 718 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 719 } else { 720 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 721 } 722 break; 723 724 case lir_add: 725 case lir_sub: 726 case lir_mul: 727 case lir_mul_strictfp: 728 case lir_div: 729 case lir_div_strictfp: 730 case lir_rem: 731 assert(op->fpu_pop_count() < 2, ""); 732 arith_op( 733 op->code(), 734 op->in_opr1(), 735 op->in_opr2(), 736 op->result_opr(), 737 op->info(), 738 op->fpu_pop_count() == 1); 739 break; 740 741 case lir_abs: 742 case lir_sqrt: 743 case lir_tan: 744 case lir_log10: 745 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 746 break; 747 748 case lir_neg: 749 negate(op->in_opr1(), op->result_opr(), op->in_opr2()); 750 break; 751 752 case lir_logic_and: 753 case lir_logic_or: 754 case lir_logic_xor: 755 logic_op( 756 op->code(), 757 op->in_opr1(), 758 op->in_opr2(), 759 op->result_opr()); 760 break; 761 762 case lir_throw: 763 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 764 break; 765 766 case lir_xadd: 767 case lir_xchg: 768 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 769 break; 770 771 default: 772 Unimplemented(); 773 break; 774 } 775 } 776 777 778 void LIR_Assembler::build_frame() { 779 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 780 } 781 782 783 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 784 assert((src->is_single_fpu() && dest->is_single_stack()) || 785 (src->is_double_fpu() && dest->is_double_stack()), 786 "round_fp: rounds register -> stack location"); 787 788 reg2stack (src, dest, src->type(), pop_fpu_stack); 789 } 790 791 792 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 793 if (src->is_register()) { 794 if (dest->is_register()) { 795 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 796 reg2reg(src, dest); 797 } else if (dest->is_stack()) { 798 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 799 reg2stack(src, dest, type, pop_fpu_stack); 800 } else if (dest->is_address()) { 801 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 802 } else { 803 ShouldNotReachHere(); 804 } 805 806 } else if (src->is_stack()) { 807 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 808 if (dest->is_register()) { 809 stack2reg(src, dest, type); 810 } else if (dest->is_stack()) { 811 stack2stack(src, dest, type); 812 } else { 813 ShouldNotReachHere(); 814 } 815 816 } else if (src->is_constant()) { 817 if (dest->is_register()) { 818 const2reg(src, dest, patch_code, info); // patching is possible 819 } else if (dest->is_stack()) { 820 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 821 const2stack(src, dest); 822 } else if (dest->is_address()) { 823 assert(patch_code == lir_patch_none, "no patching allowed here"); 824 const2mem(src, dest, type, info, wide); 825 } else { 826 ShouldNotReachHere(); 827 } 828 829 } else if (src->is_address()) { 830 mem2reg(src, dest, type, patch_code, info, wide, unaligned); 831 832 } else { 833 ShouldNotReachHere(); 834 } 835 } 836 837 838 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 839 #ifndef PRODUCT 840 if (VerifyOops) { 841 OopMapStream s(info->oop_map()); 842 while (!s.is_done()) { 843 OopMapValue v = s.current(); 844 if (v.is_oop()) { 845 VMReg r = v.reg(); 846 if (!r->is_stack()) { 847 stringStream st; 848 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 849 #ifdef SPARC 850 _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__); 851 #else 852 _masm->verify_oop(r->as_Register()); 853 #endif 854 } else { 855 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 856 } 857 } 858 check_codespace(); 859 CHECK_BAILOUT(); 860 861 s.next(); 862 } 863 } 864 #endif 865 }