1 /* 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_c1_LIRAssembler.cpp.incl" 27 28 29 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 30 // we must have enough patching space so that call can be inserted 31 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) { 32 _masm->nop(); 33 } 34 patch->install(_masm, patch_code, obj, info); 35 append_patching_stub(patch); 36 37 #ifdef ASSERT 38 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 39 if (patch->id() == PatchingStub::access_field_id) { 40 switch (code) { 41 case Bytecodes::_putstatic: 42 case Bytecodes::_getstatic: 43 case Bytecodes::_putfield: 44 case Bytecodes::_getfield: 45 break; 46 default: 47 ShouldNotReachHere(); 48 } 49 } else if (patch->id() == PatchingStub::load_klass_id) { 50 switch (code) { 51 case Bytecodes::_putstatic: 52 case Bytecodes::_getstatic: 53 case Bytecodes::_new: 54 case Bytecodes::_anewarray: 55 case Bytecodes::_multianewarray: 56 case Bytecodes::_instanceof: 57 case Bytecodes::_checkcast: 58 case Bytecodes::_ldc: 59 case Bytecodes::_ldc_w: 60 break; 61 default: 62 ShouldNotReachHere(); 63 } 64 } else { 65 ShouldNotReachHere(); 66 } 67 #endif 68 } 69 70 71 //--------------------------------------------------------------- 72 73 74 LIR_Assembler::LIR_Assembler(Compilation* c): 75 _compilation(c) 76 , _masm(c->masm()) 77 , _bs(Universe::heap()->barrier_set()) 78 , _frame_map(c->frame_map()) 79 , _current_block(NULL) 80 , _pending_non_safepoint(NULL) 81 , _pending_non_safepoint_offset(0) 82 { 83 _slow_case_stubs = new CodeStubList(); 84 } 85 86 87 LIR_Assembler::~LIR_Assembler() { 88 } 89 90 91 void LIR_Assembler::append_patching_stub(PatchingStub* stub) { 92 _slow_case_stubs->append(stub); 93 } 94 95 96 void LIR_Assembler::check_codespace() { 97 CodeSection* cs = _masm->code_section(); 98 if (cs->remaining() < (int)(1*K)) { 99 BAILOUT("CodeBuffer overflow"); 100 } 101 } 102 103 104 void LIR_Assembler::emit_code_stub(CodeStub* stub) { 105 _slow_case_stubs->append(stub); 106 } 107 108 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 109 for (int m = 0; m < stub_list->length(); m++) { 110 CodeStub* s = (*stub_list)[m]; 111 112 check_codespace(); 113 CHECK_BAILOUT(); 114 115 #ifndef PRODUCT 116 if (CommentedAssembly) { 117 stringStream st; 118 s->print_name(&st); 119 st.print(" slow case"); 120 _masm->block_comment(st.as_string()); 121 } 122 #endif 123 s->emit_code(this); 124 #ifdef ASSERT 125 s->assert_no_unbound_labels(); 126 #endif 127 } 128 } 129 130 131 void LIR_Assembler::emit_slow_case_stubs() { 132 emit_stubs(_slow_case_stubs); 133 } 134 135 136 bool LIR_Assembler::needs_icache(ciMethod* method) const { 137 return !method->is_static(); 138 } 139 140 141 int LIR_Assembler::code_offset() const { 142 return _masm->offset(); 143 } 144 145 146 address LIR_Assembler::pc() const { 147 return _masm->pc(); 148 } 149 150 151 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 152 for (int i = 0; i < info_list->length(); i++) { 153 XHandlers* handlers = info_list->at(i)->exception_handlers(); 154 155 for (int j = 0; j < handlers->length(); j++) { 156 XHandler* handler = handlers->handler_at(j); 157 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 158 assert(handler->entry_code() == NULL || 159 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 160 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 161 162 if (handler->entry_pco() == -1) { 163 // entry code not emitted yet 164 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 165 handler->set_entry_pco(code_offset()); 166 if (CommentedAssembly) { 167 _masm->block_comment("Exception adapter block"); 168 } 169 emit_lir_list(handler->entry_code()); 170 } else { 171 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 172 } 173 174 assert(handler->entry_pco() != -1, "must be set now"); 175 } 176 } 177 } 178 } 179 180 181 void LIR_Assembler::emit_code(BlockList* hir) { 182 if (PrintLIR) { 183 print_LIR(hir); 184 } 185 186 int n = hir->length(); 187 for (int i = 0; i < n; i++) { 188 emit_block(hir->at(i)); 189 CHECK_BAILOUT(); 190 } 191 192 flush_debug_info(code_offset()); 193 194 DEBUG_ONLY(check_no_unbound_labels()); 195 } 196 197 198 void LIR_Assembler::emit_block(BlockBegin* block) { 199 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 200 align_backward_branch_target(); 201 } 202 203 // if this block is the start of an exception handler, record the 204 // PC offset of the first instruction for later construction of 205 // the ExceptionHandlerTable 206 if (block->is_set(BlockBegin::exception_entry_flag)) { 207 block->set_exception_handler_pco(code_offset()); 208 } 209 210 #ifndef PRODUCT 211 if (PrintLIRWithAssembly) { 212 // don't print Phi's 213 InstructionPrinter ip(false); 214 block->print(ip); 215 } 216 #endif /* PRODUCT */ 217 218 assert(block->lir() != NULL, "must have LIR"); 219 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 220 221 #ifndef PRODUCT 222 if (CommentedAssembly) { 223 stringStream st; 224 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci()); 225 _masm->block_comment(st.as_string()); 226 } 227 #endif 228 229 emit_lir_list(block->lir()); 230 231 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 232 } 233 234 235 void LIR_Assembler::emit_lir_list(LIR_List* list) { 236 peephole(list); 237 238 int n = list->length(); 239 for (int i = 0; i < n; i++) { 240 LIR_Op* op = list->at(i); 241 242 check_codespace(); 243 CHECK_BAILOUT(); 244 245 #ifndef PRODUCT 246 if (CommentedAssembly) { 247 // Don't record out every op since that's too verbose. Print 248 // branches since they include block and stub names. Also print 249 // patching moves since they generate funny looking code. 250 if (op->code() == lir_branch || 251 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) { 252 stringStream st; 253 op->print_on(&st); 254 _masm->block_comment(st.as_string()); 255 } 256 } 257 if (PrintLIRWithAssembly) { 258 // print out the LIR operation followed by the resulting assembly 259 list->at(i)->print(); tty->cr(); 260 } 261 #endif /* PRODUCT */ 262 263 op->emit_code(this); 264 265 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 266 process_debug_info(op); 267 } 268 269 #ifndef PRODUCT 270 if (PrintLIRWithAssembly) { 271 _masm->code()->decode(); 272 } 273 #endif /* PRODUCT */ 274 } 275 } 276 277 #ifdef ASSERT 278 void LIR_Assembler::check_no_unbound_labels() { 279 CHECK_BAILOUT(); 280 281 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 282 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 283 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 284 assert(false, "unbound label"); 285 } 286 } 287 } 288 #endif 289 290 //----------------------------------debug info-------------------------------- 291 292 293 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 294 _masm->code_section()->relocate(pc(), relocInfo::poll_type); 295 int pc_offset = code_offset(); 296 flush_debug_info(pc_offset); 297 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 298 if (info->exception_handlers() != NULL) { 299 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 300 } 301 } 302 303 304 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 305 flush_debug_info(pc_offset); 306 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 307 if (cinfo->exception_handlers() != NULL) { 308 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 309 } 310 } 311 312 static ValueStack* debug_info(Instruction* ins) { 313 StateSplit* ss = ins->as_StateSplit(); 314 if (ss != NULL) return ss->state(); 315 return ins->state_before(); 316 } 317 318 void LIR_Assembler::process_debug_info(LIR_Op* op) { 319 Instruction* src = op->source(); 320 if (src == NULL) return; 321 int pc_offset = code_offset(); 322 if (_pending_non_safepoint == src) { 323 _pending_non_safepoint_offset = pc_offset; 324 return; 325 } 326 ValueStack* vstack = debug_info(src); 327 if (vstack == NULL) return; 328 if (_pending_non_safepoint != NULL) { 329 // Got some old debug info. Get rid of it. 330 if (debug_info(_pending_non_safepoint) == vstack) { 331 _pending_non_safepoint_offset = pc_offset; 332 return; 333 } 334 if (_pending_non_safepoint_offset < pc_offset) { 335 record_non_safepoint_debug_info(); 336 } 337 _pending_non_safepoint = NULL; 338 } 339 // Remember the debug info. 340 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 341 _pending_non_safepoint = src; 342 _pending_non_safepoint_offset = pc_offset; 343 } 344 } 345 346 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 347 // Return NULL if n is too large. 348 // Returns the caller_bci for the next-younger state, also. 349 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 350 ValueStack* t = s; 351 for (int i = 0; i < n; i++) { 352 if (t == NULL) break; 353 t = t->caller_state(); 354 } 355 if (t == NULL) return NULL; 356 for (;;) { 357 ValueStack* tc = t->caller_state(); 358 if (tc == NULL) return s; 359 t = tc; 360 bci_result = tc->bci(); 361 s = s->caller_state(); 362 } 363 } 364 365 void LIR_Assembler::record_non_safepoint_debug_info() { 366 int pc_offset = _pending_non_safepoint_offset; 367 ValueStack* vstack = debug_info(_pending_non_safepoint); 368 int bci = vstack->bci(); 369 370 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 371 assert(debug_info->recording_non_safepoints(), "sanity"); 372 373 debug_info->add_non_safepoint(pc_offset); 374 375 // Visit scopes from oldest to youngest. 376 for (int n = 0; ; n++) { 377 int s_bci = bci; 378 ValueStack* s = nth_oldest(vstack, n, s_bci); 379 if (s == NULL) break; 380 IRScope* scope = s->scope(); 381 //Always pass false for reexecute since these ScopeDescs are never used for deopt 382 debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/); 383 } 384 385 debug_info->end_non_safepoint(pc_offset); 386 } 387 388 389 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 390 add_debug_info_for_null_check(code_offset(), cinfo); 391 } 392 393 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 394 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 395 emit_code_stub(stub); 396 } 397 398 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 399 add_debug_info_for_div0(code_offset(), info); 400 } 401 402 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 403 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 404 emit_code_stub(stub); 405 } 406 407 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 408 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 409 } 410 411 412 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 413 verify_oop_map(op->info()); 414 415 if (os::is_MP()) { 416 // must align calls sites, otherwise they can't be updated atomically on MP hardware 417 align_call(op->code()); 418 } 419 420 // emit the static call stub stuff out of line 421 emit_static_call_stub(); 422 423 switch (op->code()) { 424 case lir_static_call: 425 call(op, relocInfo::static_call_type); 426 break; 427 case lir_optvirtual_call: 428 case lir_dynamic_call: 429 call(op, relocInfo::opt_virtual_call_type); 430 break; 431 case lir_icvirtual_call: 432 ic_call(op); 433 break; 434 case lir_virtual_call: 435 vtable_call(op); 436 break; 437 default: ShouldNotReachHere(); 438 } 439 440 // JSR 292 441 // Record if this method has MethodHandle invokes. 442 if (op->is_method_handle_invoke()) { 443 compilation()->set_has_method_handle_invokes(true); 444 } 445 446 #if defined(X86) && defined(TIERED) 447 // C2 leave fpu stack dirty clean it 448 if (UseSSE < 2) { 449 int i; 450 for ( i = 1; i <= 7 ; i++ ) { 451 ffree(i); 452 } 453 if (!op->result_opr()->is_float_kind()) { 454 ffree(0); 455 } 456 } 457 #endif // X86 && TIERED 458 } 459 460 461 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 462 _masm->bind (*(op->label())); 463 } 464 465 466 void LIR_Assembler::emit_op1(LIR_Op1* op) { 467 switch (op->code()) { 468 case lir_move: 469 if (op->move_kind() == lir_move_volatile) { 470 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 471 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 472 } else { 473 move_op(op->in_opr(), op->result_opr(), op->type(), 474 op->patch_code(), op->info(), op->pop_fpu_stack(), 475 op->move_kind() == lir_move_unaligned, 476 op->move_kind() == lir_move_wide); 477 } 478 break; 479 480 case lir_prefetchr: 481 prefetchr(op->in_opr()); 482 break; 483 484 case lir_prefetchw: 485 prefetchw(op->in_opr()); 486 break; 487 488 case lir_roundfp: { 489 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 490 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 491 break; 492 } 493 494 case lir_return: 495 return_op(op->in_opr()); 496 break; 497 498 case lir_safepoint: 499 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 500 _masm->nop(); 501 } 502 safepoint_poll(op->in_opr(), op->info()); 503 break; 504 505 case lir_fxch: 506 fxch(op->in_opr()->as_jint()); 507 break; 508 509 case lir_fld: 510 fld(op->in_opr()->as_jint()); 511 break; 512 513 case lir_ffree: 514 ffree(op->in_opr()->as_jint()); 515 break; 516 517 case lir_branch: 518 break; 519 520 case lir_push: 521 push(op->in_opr()); 522 break; 523 524 case lir_pop: 525 pop(op->in_opr()); 526 break; 527 528 case lir_neg: 529 negate(op->in_opr(), op->result_opr()); 530 break; 531 532 case lir_leal: 533 leal(op->in_opr(), op->result_opr()); 534 break; 535 536 case lir_null_check: 537 if (GenerateCompilerNullChecks) { 538 add_debug_info_for_null_check_here(op->info()); 539 540 if (op->in_opr()->is_single_cpu()) { 541 _masm->null_check(op->in_opr()->as_register()); 542 } else { 543 Unimplemented(); 544 } 545 } 546 break; 547 548 case lir_monaddr: 549 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 550 break; 551 552 #ifdef SPARC 553 case lir_pack64: 554 pack64(op->in_opr(), op->result_opr()); 555 break; 556 557 case lir_unpack64: 558 unpack64(op->in_opr(), op->result_opr()); 559 break; 560 #endif 561 562 case lir_unwind: 563 unwind_op(op->in_opr()); 564 break; 565 566 default: 567 Unimplemented(); 568 break; 569 } 570 } 571 572 573 void LIR_Assembler::emit_op0(LIR_Op0* op) { 574 switch (op->code()) { 575 case lir_word_align: { 576 while (code_offset() % BytesPerWord != 0) { 577 _masm->nop(); 578 } 579 break; 580 } 581 582 case lir_nop: 583 assert(op->info() == NULL, "not supported"); 584 _masm->nop(); 585 break; 586 587 case lir_label: 588 Unimplemented(); 589 break; 590 591 case lir_build_frame: 592 build_frame(); 593 break; 594 595 case lir_std_entry: 596 // init offsets 597 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 598 _masm->align(CodeEntryAlignment); 599 if (needs_icache(compilation()->method())) { 600 check_icache(); 601 } 602 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 603 _masm->verified_entry(); 604 build_frame(); 605 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 606 break; 607 608 case lir_osr_entry: 609 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 610 osr_entry(); 611 break; 612 613 case lir_24bit_FPU: 614 set_24bit_FPU(); 615 break; 616 617 case lir_reset_FPU: 618 reset_FPU(); 619 break; 620 621 case lir_breakpoint: 622 breakpoint(); 623 break; 624 625 case lir_fpop_raw: 626 fpop(); 627 break; 628 629 case lir_membar: 630 membar(); 631 break; 632 633 case lir_membar_acquire: 634 membar_acquire(); 635 break; 636 637 case lir_membar_release: 638 membar_release(); 639 break; 640 641 case lir_get_thread: 642 get_thread(op->result_opr()); 643 break; 644 645 default: 646 ShouldNotReachHere(); 647 break; 648 } 649 } 650 651 652 void LIR_Assembler::emit_op2(LIR_Op2* op) { 653 switch (op->code()) { 654 case lir_cmp: 655 if (op->info() != NULL) { 656 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 657 "shouldn't be codeemitinfo for non-address operands"); 658 add_debug_info_for_null_check_here(op->info()); // exception possible 659 } 660 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 661 break; 662 663 case lir_cmp_l2i: 664 case lir_cmp_fd2i: 665 case lir_ucmp_fd2i: 666 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 667 break; 668 669 case lir_cmove: 670 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr()); 671 break; 672 673 case lir_shl: 674 case lir_shr: 675 case lir_ushr: 676 if (op->in_opr2()->is_constant()) { 677 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 678 } else { 679 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr()); 680 } 681 break; 682 683 case lir_add: 684 case lir_sub: 685 case lir_mul: 686 case lir_mul_strictfp: 687 case lir_div: 688 case lir_div_strictfp: 689 case lir_rem: 690 assert(op->fpu_pop_count() < 2, ""); 691 arith_op( 692 op->code(), 693 op->in_opr1(), 694 op->in_opr2(), 695 op->result_opr(), 696 op->info(), 697 op->fpu_pop_count() == 1); 698 break; 699 700 case lir_abs: 701 case lir_sqrt: 702 case lir_sin: 703 case lir_tan: 704 case lir_cos: 705 case lir_log: 706 case lir_log10: 707 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 708 break; 709 710 case lir_logic_and: 711 case lir_logic_or: 712 case lir_logic_xor: 713 logic_op( 714 op->code(), 715 op->in_opr1(), 716 op->in_opr2(), 717 op->result_opr()); 718 break; 719 720 case lir_throw: 721 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 722 break; 723 724 default: 725 Unimplemented(); 726 break; 727 } 728 } 729 730 731 void LIR_Assembler::build_frame() { 732 _masm->build_frame(initial_frame_size_in_bytes()); 733 } 734 735 736 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 737 assert((src->is_single_fpu() && dest->is_single_stack()) || 738 (src->is_double_fpu() && dest->is_double_stack()), 739 "round_fp: rounds register -> stack location"); 740 741 reg2stack (src, dest, src->type(), pop_fpu_stack); 742 } 743 744 745 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 746 if (src->is_register()) { 747 if (dest->is_register()) { 748 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 749 reg2reg(src, dest); 750 } else if (dest->is_stack()) { 751 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 752 reg2stack(src, dest, type, pop_fpu_stack); 753 } else if (dest->is_address()) { 754 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned, wide); 755 } else { 756 ShouldNotReachHere(); 757 } 758 759 } else if (src->is_stack()) { 760 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 761 if (dest->is_register()) { 762 stack2reg(src, dest, type); 763 } else if (dest->is_stack()) { 764 stack2stack(src, dest, type); 765 } else { 766 ShouldNotReachHere(); 767 } 768 769 } else if (src->is_constant()) { 770 if (dest->is_register()) { 771 const2reg(src, dest, patch_code, info); // patching is possible 772 } else if (dest->is_stack()) { 773 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 774 const2stack(src, dest); 775 } else if (dest->is_address()) { 776 assert(patch_code == lir_patch_none, "no patching allowed here"); 777 const2mem(src, dest, type, info, wide); 778 } else { 779 ShouldNotReachHere(); 780 } 781 782 } else if (src->is_address()) { 783 mem2reg(src, dest, type, patch_code, info, unaligned, wide); 784 785 } else { 786 ShouldNotReachHere(); 787 } 788 } 789 790 791 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 792 #ifndef PRODUCT 793 if (VerifyOopMaps || VerifyOops) { 794 bool v = VerifyOops; 795 VerifyOops = true; 796 OopMapStream s(info->oop_map()); 797 while (!s.is_done()) { 798 OopMapValue v = s.current(); 799 if (v.is_oop()) { 800 VMReg r = v.reg(); 801 if (!r->is_stack()) { 802 stringStream st; 803 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 804 #ifdef SPARC 805 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__); 806 #else 807 _masm->verify_oop(r->as_Register()); 808 #endif 809 } else { 810 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 811 } 812 } 813 s.next(); 814 } 815 VerifyOops = v; 816 } 817 #endif 818 }