1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.inline.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_Instruction.hpp" 29 #include "c1/c1_InstructionPrinter.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "ci/ciValueKlass.hpp" 35 #include "gc/shared/barrierSet.hpp" 36 #include "runtime/os.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 39 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 40 // We must have enough patching space so that call can be inserted. 41 // We cannot use fat nops here, since the concurrent code rewrite may transiently 42 // create the illegal instruction sequence. 43 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) { 44 _masm->nop(); 45 } 46 patch->install(_masm, patch_code, obj, info); 47 append_code_stub(patch); 48 49 #ifdef ASSERT 50 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 51 if (patch->id() == PatchingStub::access_field_id) { 52 switch (code) { 53 case Bytecodes::_putstatic: 54 case Bytecodes::_getstatic: 55 case Bytecodes::_putfield: 56 case Bytecodes::_getfield: 57 break; 58 default: 59 ShouldNotReachHere(); 60 } 61 } else if (patch->id() == PatchingStub::load_klass_id) { 62 switch (code) { 63 case Bytecodes::_new: 64 case Bytecodes::_defaultvalue: 65 case Bytecodes::_anewarray: 66 case Bytecodes::_multianewarray: 67 case Bytecodes::_instanceof: 68 case Bytecodes::_checkcast: 69 break; 70 default: 71 ShouldNotReachHere(); 72 } 73 } else if (patch->id() == PatchingStub::load_mirror_id) { 74 switch (code) { 75 case Bytecodes::_putstatic: 76 case Bytecodes::_getstatic: 77 case Bytecodes::_ldc: 78 case Bytecodes::_ldc_w: 79 break; 80 default: 81 ShouldNotReachHere(); 82 } 83 } else if (patch->id() == PatchingStub::load_appendix_id) { 84 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci()); 85 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution"); 86 } else { 87 ShouldNotReachHere(); 88 } 89 #endif 90 } 91 92 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) { 93 IRScope* scope = info->scope(); 94 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci()); 95 if (Bytecodes::has_optional_appendix(bc_raw)) { 96 return PatchingStub::load_appendix_id; 97 } 98 return PatchingStub::load_mirror_id; 99 } 100 101 //--------------------------------------------------------------- 102 103 104 LIR_Assembler::LIR_Assembler(Compilation* c): 105 _masm(c->masm()) 106 , _bs(BarrierSet::barrier_set()) 107 , _compilation(c) 108 , _frame_map(c->frame_map()) 109 , _current_block(NULL) 110 , _pending_non_safepoint(NULL) 111 , _pending_non_safepoint_offset(0) 112 { 113 _slow_case_stubs = new CodeStubList(); 114 } 115 116 117 LIR_Assembler::~LIR_Assembler() { 118 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out. 119 // Reset it here to avoid an assertion. 120 _unwind_handler_entry.reset(); 121 _verified_value_entry.reset(); 122 } 123 124 125 void LIR_Assembler::check_codespace() { 126 CodeSection* cs = _masm->code_section(); 127 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 128 BAILOUT("CodeBuffer overflow"); 129 } 130 } 131 132 133 void LIR_Assembler::append_code_stub(CodeStub* stub) { 134 _slow_case_stubs->append(stub); 135 } 136 137 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 138 for (int m = 0; m < stub_list->length(); m++) { 139 CodeStub* s = stub_list->at(m); 140 141 check_codespace(); 142 CHECK_BAILOUT(); 143 144 #ifndef PRODUCT 145 if (CommentedAssembly) { 146 stringStream st; 147 s->print_name(&st); 148 st.print(" slow case"); 149 _masm->block_comment(st.as_string()); 150 } 151 #endif 152 s->emit_code(this); 153 #ifdef ASSERT 154 s->assert_no_unbound_labels(); 155 #endif 156 } 157 } 158 159 160 void LIR_Assembler::emit_slow_case_stubs() { 161 emit_stubs(_slow_case_stubs); 162 } 163 164 165 bool LIR_Assembler::needs_icache(ciMethod* method) const { 166 return !method->is_static(); 167 } 168 169 170 int LIR_Assembler::code_offset() const { 171 return _masm->offset(); 172 } 173 174 175 address LIR_Assembler::pc() const { 176 return _masm->pc(); 177 } 178 179 // To bang the stack of this compiled method we use the stack size 180 // that the interpreter would need in case of a deoptimization. This 181 // removes the need to bang the stack in the deoptimization blob which 182 // in turn simplifies stack overflow handling. 183 int LIR_Assembler::bang_size_in_bytes() const { 184 return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size()); 185 } 186 187 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 188 for (int i = 0; i < info_list->length(); i++) { 189 XHandlers* handlers = info_list->at(i)->exception_handlers(); 190 191 for (int j = 0; j < handlers->length(); j++) { 192 XHandler* handler = handlers->handler_at(j); 193 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 194 assert(handler->entry_code() == NULL || 195 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 196 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 197 198 if (handler->entry_pco() == -1) { 199 // entry code not emitted yet 200 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 201 handler->set_entry_pco(code_offset()); 202 if (CommentedAssembly) { 203 _masm->block_comment("Exception adapter block"); 204 } 205 emit_lir_list(handler->entry_code()); 206 } else { 207 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 208 } 209 210 assert(handler->entry_pco() != -1, "must be set now"); 211 } 212 } 213 } 214 } 215 216 217 void LIR_Assembler::emit_code(BlockList* hir) { 218 if (PrintLIR) { 219 print_LIR(hir); 220 } 221 222 int n = hir->length(); 223 for (int i = 0; i < n; i++) { 224 emit_block(hir->at(i)); 225 CHECK_BAILOUT(); 226 } 227 228 flush_debug_info(code_offset()); 229 230 DEBUG_ONLY(check_no_unbound_labels()); 231 } 232 233 234 void LIR_Assembler::emit_block(BlockBegin* block) { 235 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 236 align_backward_branch_target(); 237 } 238 239 // if this block is the start of an exception handler, record the 240 // PC offset of the first instruction for later construction of 241 // the ExceptionHandlerTable 242 if (block->is_set(BlockBegin::exception_entry_flag)) { 243 block->set_exception_handler_pco(code_offset()); 244 } 245 246 #ifndef PRODUCT 247 if (PrintLIRWithAssembly) { 248 // don't print Phi's 249 InstructionPrinter ip(false); 250 block->print(ip); 251 } 252 #endif /* PRODUCT */ 253 254 assert(block->lir() != NULL, "must have LIR"); 255 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 256 257 #ifndef PRODUCT 258 if (CommentedAssembly) { 259 stringStream st; 260 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 261 _masm->block_comment(st.as_string()); 262 } 263 #endif 264 265 emit_lir_list(block->lir()); 266 267 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 268 } 269 270 271 void LIR_Assembler::emit_lir_list(LIR_List* list) { 272 peephole(list); 273 274 int n = list->length(); 275 for (int i = 0; i < n; i++) { 276 LIR_Op* op = list->at(i); 277 278 check_codespace(); 279 CHECK_BAILOUT(); 280 281 #ifndef PRODUCT 282 if (CommentedAssembly) { 283 // Don't record out every op since that's too verbose. Print 284 // branches since they include block and stub names. Also print 285 // patching moves since they generate funny looking code. 286 if (op->code() == lir_branch || 287 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) || 288 (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) { 289 stringStream st; 290 op->print_on(&st); 291 _masm->block_comment(st.as_string()); 292 } 293 } 294 if (PrintLIRWithAssembly) { 295 // print out the LIR operation followed by the resulting assembly 296 list->at(i)->print(); tty->cr(); 297 } 298 #endif /* PRODUCT */ 299 300 op->emit_code(this); 301 302 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 303 process_debug_info(op); 304 } 305 306 #ifndef PRODUCT 307 if (PrintLIRWithAssembly) { 308 _masm->code()->decode(); 309 } 310 #endif /* PRODUCT */ 311 } 312 } 313 314 #ifdef ASSERT 315 void LIR_Assembler::check_no_unbound_labels() { 316 CHECK_BAILOUT(); 317 318 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 319 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 320 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 321 assert(false, "unbound label"); 322 } 323 } 324 } 325 #endif 326 327 //----------------------------------debug info-------------------------------- 328 329 330 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 331 int pc_offset = code_offset(); 332 flush_debug_info(pc_offset); 333 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 334 if (info->exception_handlers() != NULL) { 335 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 336 } 337 } 338 339 340 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 341 flush_debug_info(pc_offset); 342 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 343 if (cinfo->exception_handlers() != NULL) { 344 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 345 } 346 } 347 348 static ValueStack* debug_info(Instruction* ins) { 349 StateSplit* ss = ins->as_StateSplit(); 350 if (ss != NULL) return ss->state(); 351 return ins->state_before(); 352 } 353 354 void LIR_Assembler::process_debug_info(LIR_Op* op) { 355 Instruction* src = op->source(); 356 if (src == NULL) return; 357 int pc_offset = code_offset(); 358 if (_pending_non_safepoint == src) { 359 _pending_non_safepoint_offset = pc_offset; 360 return; 361 } 362 ValueStack* vstack = debug_info(src); 363 if (vstack == NULL) return; 364 if (_pending_non_safepoint != NULL) { 365 // Got some old debug info. Get rid of it. 366 if (debug_info(_pending_non_safepoint) == vstack) { 367 _pending_non_safepoint_offset = pc_offset; 368 return; 369 } 370 if (_pending_non_safepoint_offset < pc_offset) { 371 record_non_safepoint_debug_info(); 372 } 373 _pending_non_safepoint = NULL; 374 } 375 // Remember the debug info. 376 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 377 _pending_non_safepoint = src; 378 _pending_non_safepoint_offset = pc_offset; 379 } 380 } 381 382 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 383 // Return NULL if n is too large. 384 // Returns the caller_bci for the next-younger state, also. 385 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 386 ValueStack* t = s; 387 for (int i = 0; i < n; i++) { 388 if (t == NULL) break; 389 t = t->caller_state(); 390 } 391 if (t == NULL) return NULL; 392 for (;;) { 393 ValueStack* tc = t->caller_state(); 394 if (tc == NULL) return s; 395 t = tc; 396 bci_result = tc->bci(); 397 s = s->caller_state(); 398 } 399 } 400 401 void LIR_Assembler::record_non_safepoint_debug_info() { 402 int pc_offset = _pending_non_safepoint_offset; 403 ValueStack* vstack = debug_info(_pending_non_safepoint); 404 int bci = vstack->bci(); 405 406 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 407 assert(debug_info->recording_non_safepoints(), "sanity"); 408 409 debug_info->add_non_safepoint(pc_offset); 410 411 // Visit scopes from oldest to youngest. 412 for (int n = 0; ; n++) { 413 int s_bci = bci; 414 ValueStack* s = nth_oldest(vstack, n, s_bci); 415 if (s == NULL) break; 416 IRScope* scope = s->scope(); 417 //Always pass false for reexecute since these ScopeDescs are never used for deopt 418 methodHandle null_mh; 419 debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/); 420 } 421 422 debug_info->end_non_safepoint(pc_offset); 423 } 424 425 426 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 427 return add_debug_info_for_null_check(code_offset(), cinfo); 428 } 429 430 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 431 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 432 append_code_stub(stub); 433 return stub; 434 } 435 436 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 437 add_debug_info_for_div0(code_offset(), info); 438 } 439 440 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 441 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 442 append_code_stub(stub); 443 } 444 445 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 446 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 447 } 448 449 450 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 451 verify_oop_map(op->info()); 452 453 // must align calls sites, otherwise they can't be updated atomically 454 align_call(op->code()); 455 456 // emit the static call stub stuff out of line 457 emit_static_call_stub(); 458 CHECK_BAILOUT(); 459 460 switch (op->code()) { 461 case lir_static_call: 462 case lir_dynamic_call: 463 call(op, relocInfo::static_call_type); 464 break; 465 case lir_optvirtual_call: 466 call(op, relocInfo::opt_virtual_call_type); 467 break; 468 case lir_icvirtual_call: 469 ic_call(op); 470 break; 471 case lir_virtual_call: 472 vtable_call(op); 473 break; 474 default: 475 fatal("unexpected op code: %s", op->name()); 476 break; 477 } 478 479 // JSR 292 480 // Record if this method has MethodHandle invokes. 481 if (op->is_method_handle_invoke()) { 482 compilation()->set_has_method_handle_invokes(true); 483 } 484 485 ciMethod* method = op->method(); 486 if (ValueTypeReturnedAsFields && method->signature()->returns_never_null()) { 487 ciType* return_type = method->return_type(); 488 if (return_type->is_valuetype()) { 489 ciValueKlass* vk = return_type->as_value_klass(); 490 if (vk->can_be_returned_as_fields()) { 491 store_value_type_fields_to_buf(vk); 492 } 493 } 494 } 495 496 #if defined(X86) && defined(TIERED) 497 // C2 leave fpu stack dirty clean it 498 if (UseSSE < 2) { 499 int i; 500 for ( i = 1; i <= 7 ; i++ ) { 501 ffree(i); 502 } 503 if (!op->result_opr()->is_float_kind()) { 504 ffree(0); 505 } 506 } 507 #endif // X86 && TIERED 508 } 509 510 511 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 512 _masm->bind (*(op->label())); 513 } 514 515 516 void LIR_Assembler::emit_op1(LIR_Op1* op) { 517 switch (op->code()) { 518 case lir_move: 519 if (op->move_kind() == lir_move_volatile) { 520 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 521 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 522 } else { 523 move_op(op->in_opr(), op->result_opr(), op->type(), 524 op->patch_code(), op->info(), op->pop_fpu_stack(), 525 op->move_kind() == lir_move_unaligned, 526 op->move_kind() == lir_move_wide); 527 } 528 break; 529 530 case lir_roundfp: { 531 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 532 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 533 break; 534 } 535 536 case lir_return: 537 return_op(op->in_opr()); 538 break; 539 540 case lir_safepoint: 541 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 542 _masm->nop(); 543 } 544 safepoint_poll(op->in_opr(), op->info()); 545 break; 546 547 case lir_fxch: 548 fxch(op->in_opr()->as_jint()); 549 break; 550 551 case lir_fld: 552 fld(op->in_opr()->as_jint()); 553 break; 554 555 case lir_ffree: 556 ffree(op->in_opr()->as_jint()); 557 break; 558 559 case lir_branch: 560 break; 561 562 case lir_push: 563 push(op->in_opr()); 564 break; 565 566 case lir_pop: 567 pop(op->in_opr()); 568 break; 569 570 case lir_leal: 571 leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info()); 572 break; 573 574 case lir_null_check: { 575 ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info()); 576 577 if (op->in_opr()->is_single_cpu()) { 578 _masm->null_check(op->in_opr()->as_register(), stub->entry()); 579 } else { 580 Unimplemented(); 581 } 582 break; 583 } 584 585 case lir_monaddr: 586 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 587 break; 588 589 #ifdef SPARC 590 case lir_pack64: 591 pack64(op->in_opr(), op->result_opr()); 592 break; 593 594 case lir_unpack64: 595 unpack64(op->in_opr(), op->result_opr()); 596 break; 597 #endif 598 599 case lir_unwind: 600 unwind_op(op->in_opr()); 601 break; 602 603 default: 604 Unimplemented(); 605 break; 606 } 607 } 608 609 void LIR_Assembler::add_std_entry_info(int pc_offset, bool no_receiver) { 610 // FIXME: build different oompaps/stack/locals according to no_receiver. 611 flush_debug_info(pc_offset); 612 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 613 OopMap* oop_map = new OopMap(0, 0); // FIXME 614 debug_info->add_safepoint(pc_offset, oop_map); 615 DebugToken* locvals = debug_info->create_scope_values(NULL); // FIXME is this needed (for Java debugging to work properly??) 616 DebugToken* expvals = debug_info->create_scope_values(NULL); // FIXME is this needed (for Java debugging to work properly??) 617 DebugToken* monvals = debug_info->create_monitor_values(NULL); // FIXME: need testing with synchronized method 618 bool reexecute = false; 619 bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis. 620 bool rethrow_exception = false; 621 bool is_method_handle_invoke = false; 622 debug_info->describe_scope(pc_offset, methodHandle(), method(), 0, reexecute, rethrow_exception, is_method_handle_invoke, return_oop, false, locvals, expvals, monvals); 623 debug_info->end_safepoint(pc_offset); 624 } 625 626 void LIR_Assembler::emit_std_entries() { 627 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 628 629 const CompiledEntrySignature* ces = compilation()->compiled_entry_signature(); 630 if (ces->has_scalarized_args()) { 631 assert(ValueTypePassFieldsAsArgs && method()->get_Method()->has_scalarized_args(), "must be"); 632 add_std_entry_info(emit_std_entry(CodeOffsets::Verified_Entry, ces), false); 633 634 bool has_value_ro_entry = false; 635 if (ces->has_value_recv() && ces->num_value_args() > 1) { 636 // We need a separate entry for value_ro 637 has_value_ro_entry = true; 638 add_std_entry_info(emit_std_entry(CodeOffsets::Verified_Value_Entry_RO, ces), true); 639 } 640 emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL); 641 if (!has_value_ro_entry) { 642 if (ces->has_value_recv()) { 643 assert(ces->num_value_args() == 1, "must be"); 644 offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO, 645 offsets()->value(CodeOffsets::Verified_Value_Entry)); 646 } else { 647 assert(ces->num_value_args() > 0, "must be"); 648 offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO, 649 offsets()->value(CodeOffsets::Verified_Entry)); 650 } 651 } 652 } else { 653 // All 3 entries are the same (no value-type packing) 654 int offset = emit_std_entry(CodeOffsets::Verified_Value_Entry, NULL); 655 offsets()->set_value(CodeOffsets::Verified_Entry, offset); 656 offsets()->set_value(CodeOffsets::Verified_Value_Entry_RO, offset); 657 } 658 } 659 660 int LIR_Assembler::emit_std_entry(CodeOffsets::Entries entry, const CompiledEntrySignature* ces) { 661 _masm->align(CodeEntryAlignment); 662 if (needs_icache(compilation()->method())) { 663 check_icache(); 664 } 665 offsets()->set_value(entry, _masm->offset()); 666 switch (entry) { 667 case CodeOffsets::Verified_Entry: 668 return _masm->verified_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), _verified_value_entry); 669 case CodeOffsets::Verified_Value_Entry_RO: 670 return _masm->verified_value_ro_entry(ces, initial_frame_size_in_bytes(), bang_size_in_bytes(), _verified_value_entry); 671 default: 672 { 673 int offset = _masm->offset(); 674 assert(entry == CodeOffsets::Verified_Value_Entry, "must be"); 675 _masm->verified_value_entry(); 676 build_frame(); 677 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 678 return offset; 679 } 680 } 681 } 682 683 void LIR_Assembler::emit_op0(LIR_Op0* op) { 684 switch (op->code()) { 685 case lir_word_align: { 686 _masm->align(BytesPerWord); 687 break; 688 } 689 690 case lir_nop: 691 assert(op->info() == NULL, "not supported"); 692 _masm->nop(); 693 break; 694 695 case lir_label: 696 Unimplemented(); 697 break; 698 699 case lir_build_frame: 700 build_frame(); 701 break; 702 703 case lir_std_entry: 704 emit_std_entries(); 705 break; 706 707 case lir_osr_entry: 708 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 709 osr_entry(); 710 break; 711 712 case lir_24bit_FPU: 713 set_24bit_FPU(); 714 break; 715 716 case lir_reset_FPU: 717 reset_FPU(); 718 break; 719 720 case lir_breakpoint: 721 breakpoint(); 722 break; 723 724 case lir_fpop_raw: 725 fpop(); 726 break; 727 728 case lir_membar: 729 membar(); 730 break; 731 732 case lir_membar_acquire: 733 membar_acquire(); 734 break; 735 736 case lir_membar_release: 737 membar_release(); 738 break; 739 740 case lir_membar_loadload: 741 membar_loadload(); 742 break; 743 744 case lir_membar_storestore: 745 membar_storestore(); 746 break; 747 748 case lir_membar_loadstore: 749 membar_loadstore(); 750 break; 751 752 case lir_membar_storeload: 753 membar_storeload(); 754 break; 755 756 case lir_get_thread: 757 get_thread(op->result_opr()); 758 break; 759 760 case lir_on_spin_wait: 761 on_spin_wait(); 762 break; 763 764 default: 765 ShouldNotReachHere(); 766 break; 767 } 768 } 769 770 771 void LIR_Assembler::emit_op2(LIR_Op2* op) { 772 switch (op->code()) { 773 case lir_cmp: 774 if (op->info() != NULL) { 775 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 776 "shouldn't be codeemitinfo for non-address operands"); 777 add_debug_info_for_null_check_here(op->info()); // exception possible 778 } 779 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 780 break; 781 782 case lir_cmp_l2i: 783 case lir_cmp_fd2i: 784 case lir_ucmp_fd2i: 785 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 786 break; 787 788 case lir_cmove: 789 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type()); 790 break; 791 792 case lir_shl: 793 case lir_shr: 794 case lir_ushr: 795 if (op->in_opr2()->is_constant()) { 796 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 797 } else { 798 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 799 } 800 break; 801 802 case lir_add: 803 case lir_sub: 804 case lir_mul: 805 case lir_mul_strictfp: 806 case lir_div: 807 case lir_div_strictfp: 808 case lir_rem: 809 assert(op->fpu_pop_count() < 2, ""); 810 arith_op( 811 op->code(), 812 op->in_opr1(), 813 op->in_opr2(), 814 op->result_opr(), 815 op->info(), 816 op->fpu_pop_count() == 1); 817 break; 818 819 case lir_abs: 820 case lir_sqrt: 821 case lir_tan: 822 case lir_log10: 823 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 824 break; 825 826 case lir_neg: 827 negate(op->in_opr1(), op->result_opr(), op->in_opr2()); 828 break; 829 830 case lir_logic_and: 831 case lir_logic_or: 832 case lir_logic_xor: 833 logic_op( 834 op->code(), 835 op->in_opr1(), 836 op->in_opr2(), 837 op->result_opr()); 838 break; 839 840 case lir_throw: 841 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 842 break; 843 844 case lir_xadd: 845 case lir_xchg: 846 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); 847 break; 848 849 default: 850 Unimplemented(); 851 break; 852 } 853 } 854 855 856 void LIR_Assembler::build_frame() { 857 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), 858 compilation()->compiled_entry_signature()->c1_needs_stack_repair(), 859 &_verified_value_entry); 860 } 861 862 863 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 864 assert((src->is_single_fpu() && dest->is_single_stack()) || 865 (src->is_double_fpu() && dest->is_double_stack()), 866 "round_fp: rounds register -> stack location"); 867 868 reg2stack (src, dest, src->type(), pop_fpu_stack); 869 } 870 871 872 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 873 if (src->is_register()) { 874 if (dest->is_register()) { 875 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 876 reg2reg(src, dest); 877 } else if (dest->is_stack()) { 878 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 879 reg2stack(src, dest, type, pop_fpu_stack); 880 } else if (dest->is_address()) { 881 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 882 } else { 883 ShouldNotReachHere(); 884 } 885 886 } else if (src->is_stack()) { 887 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 888 if (dest->is_register()) { 889 stack2reg(src, dest, type); 890 } else if (dest->is_stack()) { 891 stack2stack(src, dest, type); 892 } else { 893 ShouldNotReachHere(); 894 } 895 896 } else if (src->is_constant()) { 897 if (dest->is_register()) { 898 const2reg(src, dest, patch_code, info); // patching is possible 899 } else if (dest->is_stack()) { 900 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 901 const2stack(src, dest); 902 } else if (dest->is_address()) { 903 assert(patch_code == lir_patch_none, "no patching allowed here"); 904 const2mem(src, dest, type, info, wide); 905 } else { 906 ShouldNotReachHere(); 907 } 908 909 } else if (src->is_address()) { 910 mem2reg(src, dest, type, patch_code, info, wide, unaligned); 911 912 } else { 913 ShouldNotReachHere(); 914 } 915 } 916 917 918 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 919 #ifndef PRODUCT 920 if (VerifyOops) { 921 OopMapStream s(info->oop_map()); 922 while (!s.is_done()) { 923 OopMapValue v = s.current(); 924 if (v.is_oop()) { 925 VMReg r = v.reg(); 926 if (!r->is_stack()) { 927 stringStream st; 928 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 929 #ifdef SPARC 930 _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__); 931 #else 932 _masm->verify_oop(r->as_Register()); 933 #endif 934 } else { 935 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 936 } 937 } 938 check_codespace(); 939 CHECK_BAILOUT(); 940 941 s.next(); 942 } 943 } 944 #endif 945 }