1 /* 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_c1_LIRAssembler.cpp.incl" 27 28 29 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { 30 // we must have enough patching space so that call can be inserted 31 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) { 32 _masm->nop(); 33 } 34 patch->install(_masm, patch_code, obj, info); 35 append_patching_stub(patch); 36 37 #ifdef ASSERT 38 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci()); 39 if (patch->id() == PatchingStub::access_field_id) { 40 switch (code) { 41 case Bytecodes::_putstatic: 42 case Bytecodes::_getstatic: 43 case Bytecodes::_putfield: 44 case Bytecodes::_getfield: 45 break; 46 default: 47 ShouldNotReachHere(); 48 } 49 } else if (patch->id() == PatchingStub::load_klass_id) { 50 switch (code) { 51 case Bytecodes::_putstatic: 52 case Bytecodes::_getstatic: 53 case Bytecodes::_new: 54 case Bytecodes::_anewarray: 55 case Bytecodes::_multianewarray: 56 case Bytecodes::_instanceof: 57 case Bytecodes::_checkcast: 58 case Bytecodes::_ldc: 59 case Bytecodes::_ldc_w: 60 break; 61 default: 62 ShouldNotReachHere(); 63 } 64 } else { 65 ShouldNotReachHere(); 66 } 67 #endif 68 } 69 70 71 //--------------------------------------------------------------- 72 73 74 LIR_Assembler::LIR_Assembler(Compilation* c): 75 _compilation(c) 76 , _masm(c->masm()) 77 , _bs(Universe::heap()->barrier_set()) 78 , _frame_map(c->frame_map()) 79 , _current_block(NULL) 80 , _pending_non_safepoint(NULL) 81 , _pending_non_safepoint_offset(0) 82 { 83 _slow_case_stubs = new CodeStubList(); 84 } 85 86 87 LIR_Assembler::~LIR_Assembler() { 88 } 89 90 91 void LIR_Assembler::append_patching_stub(PatchingStub* stub) { 92 _slow_case_stubs->append(stub); 93 } 94 95 96 void LIR_Assembler::check_codespace() { 97 CodeSection* cs = _masm->code_section(); 98 if (cs->remaining() < (int)(1*K)) { 99 BAILOUT("CodeBuffer overflow"); 100 } 101 } 102 103 104 void LIR_Assembler::emit_code_stub(CodeStub* stub) { 105 _slow_case_stubs->append(stub); 106 } 107 108 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) { 109 for (int m = 0; m < stub_list->length(); m++) { 110 CodeStub* s = (*stub_list)[m]; 111 112 check_codespace(); 113 CHECK_BAILOUT(); 114 115 #ifndef PRODUCT 116 if (CommentedAssembly) { 117 stringStream st; 118 s->print_name(&st); 119 st.print(" slow case"); 120 _masm->block_comment(st.as_string()); 121 } 122 #endif 123 s->emit_code(this); 124 #ifdef ASSERT 125 s->assert_no_unbound_labels(); 126 #endif 127 } 128 } 129 130 131 void LIR_Assembler::emit_slow_case_stubs() { 132 emit_stubs(_slow_case_stubs); 133 } 134 135 136 bool LIR_Assembler::needs_icache(ciMethod* method) const { 137 return !method->is_static(); 138 } 139 140 141 int LIR_Assembler::code_offset() const { 142 return _masm->offset(); 143 } 144 145 146 address LIR_Assembler::pc() const { 147 return _masm->pc(); 148 } 149 150 151 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { 152 for (int i = 0; i < info_list->length(); i++) { 153 XHandlers* handlers = info_list->at(i)->exception_handlers(); 154 155 for (int j = 0; j < handlers->length(); j++) { 156 XHandler* handler = handlers->handler_at(j); 157 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan"); 158 assert(handler->entry_code() == NULL || 159 handler->entry_code()->instructions_list()->last()->code() == lir_branch || 160 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch"); 161 162 if (handler->entry_pco() == -1) { 163 // entry code not emitted yet 164 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) { 165 handler->set_entry_pco(code_offset()); 166 if (CommentedAssembly) { 167 _masm->block_comment("Exception adapter block"); 168 } 169 emit_lir_list(handler->entry_code()); 170 } else { 171 handler->set_entry_pco(handler->entry_block()->exception_handler_pco()); 172 } 173 174 assert(handler->entry_pco() != -1, "must be set now"); 175 } 176 } 177 } 178 } 179 180 181 void LIR_Assembler::emit_code(BlockList* hir) { 182 if (PrintLIR) { 183 print_LIR(hir); 184 } 185 186 int n = hir->length(); 187 for (int i = 0; i < n; i++) { 188 emit_block(hir->at(i)); 189 CHECK_BAILOUT(); 190 } 191 192 flush_debug_info(code_offset()); 193 194 DEBUG_ONLY(check_no_unbound_labels()); 195 } 196 197 198 void LIR_Assembler::emit_block(BlockBegin* block) { 199 if (block->is_set(BlockBegin::backward_branch_target_flag)) { 200 align_backward_branch_target(); 201 } 202 203 // if this block is the start of an exception handler, record the 204 // PC offset of the first instruction for later construction of 205 // the ExceptionHandlerTable 206 if (block->is_set(BlockBegin::exception_entry_flag)) { 207 block->set_exception_handler_pco(code_offset()); 208 } 209 210 #ifndef PRODUCT 211 if (PrintLIRWithAssembly) { 212 // don't print Phi's 213 InstructionPrinter ip(false); 214 block->print(ip); 215 } 216 #endif /* PRODUCT */ 217 218 assert(block->lir() != NULL, "must have LIR"); 219 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 220 221 #ifndef PRODUCT 222 if (CommentedAssembly) { 223 stringStream st; 224 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci()); 225 _masm->block_comment(st.as_string()); 226 } 227 #endif 228 229 emit_lir_list(block->lir()); 230 231 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); 232 } 233 234 235 void LIR_Assembler::emit_lir_list(LIR_List* list) { 236 peephole(list); 237 238 int n = list->length(); 239 for (int i = 0; i < n; i++) { 240 LIR_Op* op = list->at(i); 241 242 check_codespace(); 243 CHECK_BAILOUT(); 244 245 #ifndef PRODUCT 246 if (CommentedAssembly) { 247 // Don't record out every op since that's too verbose. Print 248 // branches since they include block and stub names. Also print 249 // patching moves since they generate funny looking code. 250 if (op->code() == lir_branch || 251 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) { 252 stringStream st; 253 op->print_on(&st); 254 _masm->block_comment(st.as_string()); 255 } 256 } 257 if (PrintLIRWithAssembly) { 258 // print out the LIR operation followed by the resulting assembly 259 list->at(i)->print(); tty->cr(); 260 } 261 #endif /* PRODUCT */ 262 263 op->emit_code(this); 264 265 if (compilation()->debug_info_recorder()->recording_non_safepoints()) { 266 process_debug_info(op); 267 } 268 269 #ifndef PRODUCT 270 if (PrintLIRWithAssembly) { 271 _masm->code()->decode(); 272 } 273 #endif /* PRODUCT */ 274 } 275 } 276 277 #ifdef ASSERT 278 void LIR_Assembler::check_no_unbound_labels() { 279 CHECK_BAILOUT(); 280 281 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { 282 if (!_branch_target_blocks.at(i)->label()->is_bound()) { 283 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id()); 284 assert(false, "unbound label"); 285 } 286 } 287 } 288 #endif 289 290 //----------------------------------debug info-------------------------------- 291 292 293 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { 294 _masm->code_section()->relocate(pc(), relocInfo::poll_type); 295 int pc_offset = code_offset(); 296 flush_debug_info(pc_offset); 297 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 298 if (info->exception_handlers() != NULL) { 299 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers()); 300 } 301 } 302 303 304 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { 305 flush_debug_info(pc_offset); 306 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); 307 if (cinfo->exception_handlers() != NULL) { 308 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); 309 } 310 } 311 312 static ValueStack* debug_info(Instruction* ins) { 313 StateSplit* ss = ins->as_StateSplit(); 314 if (ss != NULL) return ss->state(); 315 return ins->lock_stack(); 316 } 317 318 void LIR_Assembler::process_debug_info(LIR_Op* op) { 319 Instruction* src = op->source(); 320 if (src == NULL) return; 321 int pc_offset = code_offset(); 322 if (_pending_non_safepoint == src) { 323 _pending_non_safepoint_offset = pc_offset; 324 return; 325 } 326 ValueStack* vstack = debug_info(src); 327 if (vstack == NULL) return; 328 if (_pending_non_safepoint != NULL) { 329 // Got some old debug info. Get rid of it. 330 if (_pending_non_safepoint->bci() == src->bci() && 331 debug_info(_pending_non_safepoint) == vstack) { 332 _pending_non_safepoint_offset = pc_offset; 333 return; 334 } 335 if (_pending_non_safepoint_offset < pc_offset) { 336 record_non_safepoint_debug_info(); 337 } 338 _pending_non_safepoint = NULL; 339 } 340 // Remember the debug info. 341 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) { 342 _pending_non_safepoint = src; 343 _pending_non_safepoint_offset = pc_offset; 344 } 345 } 346 347 // Index caller states in s, where 0 is the oldest, 1 its callee, etc. 348 // Return NULL if n is too large. 349 // Returns the caller_bci for the next-younger state, also. 350 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) { 351 ValueStack* t = s; 352 for (int i = 0; i < n; i++) { 353 if (t == NULL) break; 354 t = t->caller_state(); 355 } 356 if (t == NULL) return NULL; 357 for (;;) { 358 ValueStack* tc = t->caller_state(); 359 if (tc == NULL) return s; 360 t = tc; 361 bci_result = s->scope()->caller_bci(); 362 s = s->caller_state(); 363 } 364 } 365 366 void LIR_Assembler::record_non_safepoint_debug_info() { 367 int pc_offset = _pending_non_safepoint_offset; 368 ValueStack* vstack = debug_info(_pending_non_safepoint); 369 int bci = _pending_non_safepoint->bci(); 370 371 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder(); 372 assert(debug_info->recording_non_safepoints(), "sanity"); 373 374 debug_info->add_non_safepoint(pc_offset); 375 376 // Visit scopes from oldest to youngest. 377 for (int n = 0; ; n++) { 378 int s_bci = bci; 379 ValueStack* s = nth_oldest(vstack, n, s_bci); 380 if (s == NULL) break; 381 IRScope* scope = s->scope(); 382 //Always pass false for reexecute since these ScopeDescs are never used for deopt 383 debug_info->describe_scope(pc_offset, scope->method(), s_bci, false/*reexecute*/); 384 } 385 386 debug_info->end_non_safepoint(pc_offset); 387 } 388 389 390 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) { 391 add_debug_info_for_null_check(code_offset(), cinfo); 392 } 393 394 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 395 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 396 emit_code_stub(stub); 397 } 398 399 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 400 add_debug_info_for_div0(code_offset(), info); 401 } 402 403 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 404 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 405 emit_code_stub(stub); 406 } 407 408 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) { 409 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info()); 410 } 411 412 413 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { 414 verify_oop_map(op->info()); 415 416 if (os::is_MP()) { 417 // must align calls sites, otherwise they can't be updated atomically on MP hardware 418 align_call(op->code()); 419 } 420 421 // emit the static call stub stuff out of line 422 emit_static_call_stub(); 423 424 switch (op->code()) { 425 case lir_static_call: 426 call(op, relocInfo::static_call_type); 427 break; 428 case lir_optvirtual_call: 429 case lir_dynamic_call: 430 call(op, relocInfo::opt_virtual_call_type); 431 break; 432 case lir_icvirtual_call: 433 ic_call(op); 434 break; 435 case lir_virtual_call: 436 vtable_call(op); 437 break; 438 default: ShouldNotReachHere(); 439 } 440 441 // JSR 292 442 // Record if this method has MethodHandle invokes. 443 if (op->is_method_handle_invoke()) { 444 compilation()->set_has_method_handle_invokes(true); 445 } 446 447 #if defined(X86) && defined(TIERED) 448 // C2 leave fpu stack dirty clean it 449 if (UseSSE < 2) { 450 int i; 451 for ( i = 1; i <= 7 ; i++ ) { 452 ffree(i); 453 } 454 if (!op->result_opr()->is_float_kind()) { 455 ffree(0); 456 } 457 } 458 #endif // X86 && TIERED 459 } 460 461 462 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) { 463 _masm->bind (*(op->label())); 464 } 465 466 467 void LIR_Assembler::emit_op1(LIR_Op1* op) { 468 switch (op->code()) { 469 case lir_move: 470 if (op->move_kind() == lir_move_volatile) { 471 assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); 472 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 473 } else { 474 move_op(op->in_opr(), op->result_opr(), op->type(), 475 op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned); 476 } 477 break; 478 479 case lir_prefetchr: 480 prefetchr(op->in_opr()); 481 break; 482 483 case lir_prefetchw: 484 prefetchw(op->in_opr()); 485 break; 486 487 case lir_roundfp: { 488 LIR_OpRoundFP* round_op = op->as_OpRoundFP(); 489 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); 490 break; 491 } 492 493 case lir_return: 494 return_op(op->in_opr()); 495 break; 496 497 case lir_safepoint: 498 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { 499 _masm->nop(); 500 } 501 safepoint_poll(op->in_opr(), op->info()); 502 break; 503 504 case lir_fxch: 505 fxch(op->in_opr()->as_jint()); 506 break; 507 508 case lir_fld: 509 fld(op->in_opr()->as_jint()); 510 break; 511 512 case lir_ffree: 513 ffree(op->in_opr()->as_jint()); 514 break; 515 516 case lir_branch: 517 break; 518 519 case lir_push: 520 push(op->in_opr()); 521 break; 522 523 case lir_pop: 524 pop(op->in_opr()); 525 break; 526 527 case lir_neg: 528 negate(op->in_opr(), op->result_opr()); 529 break; 530 531 case lir_leal: 532 leal(op->in_opr(), op->result_opr()); 533 break; 534 535 case lir_null_check: 536 if (GenerateCompilerNullChecks) { 537 add_debug_info_for_null_check_here(op->info()); 538 539 if (op->in_opr()->is_single_cpu()) { 540 _masm->null_check(op->in_opr()->as_register()); 541 } else { 542 Unimplemented(); 543 } 544 } 545 break; 546 547 case lir_monaddr: 548 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); 549 break; 550 551 #ifdef SPARC 552 case lir_pack64: 553 pack64(op->in_opr(), op->result_opr()); 554 break; 555 556 case lir_unpack64: 557 unpack64(op->in_opr(), op->result_opr()); 558 break; 559 #endif 560 561 case lir_unwind: 562 unwind_op(op->in_opr()); 563 break; 564 565 default: 566 Unimplemented(); 567 break; 568 } 569 } 570 571 572 void LIR_Assembler::emit_op0(LIR_Op0* op) { 573 switch (op->code()) { 574 case lir_word_align: { 575 while (code_offset() % BytesPerWord != 0) { 576 _masm->nop(); 577 } 578 break; 579 } 580 581 case lir_nop: 582 assert(op->info() == NULL, "not supported"); 583 _masm->nop(); 584 break; 585 586 case lir_label: 587 Unimplemented(); 588 break; 589 590 case lir_build_frame: 591 build_frame(); 592 break; 593 594 case lir_std_entry: 595 // init offsets 596 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 597 _masm->align(CodeEntryAlignment); 598 if (needs_icache(compilation()->method())) { 599 check_icache(); 600 } 601 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset()); 602 _masm->verified_entry(); 603 build_frame(); 604 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); 605 break; 606 607 case lir_osr_entry: 608 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset()); 609 osr_entry(); 610 break; 611 612 case lir_24bit_FPU: 613 set_24bit_FPU(); 614 break; 615 616 case lir_reset_FPU: 617 reset_FPU(); 618 break; 619 620 case lir_breakpoint: 621 breakpoint(); 622 break; 623 624 case lir_fpop_raw: 625 fpop(); 626 break; 627 628 case lir_membar: 629 membar(); 630 break; 631 632 case lir_membar_acquire: 633 membar_acquire(); 634 break; 635 636 case lir_membar_release: 637 membar_release(); 638 break; 639 640 case lir_get_thread: 641 get_thread(op->result_opr()); 642 break; 643 644 default: 645 ShouldNotReachHere(); 646 break; 647 } 648 } 649 650 651 void LIR_Assembler::emit_op2(LIR_Op2* op) { 652 switch (op->code()) { 653 case lir_cmp: 654 if (op->info() != NULL) { 655 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), 656 "shouldn't be codeemitinfo for non-address operands"); 657 add_debug_info_for_null_check_here(op->info()); // exception possible 658 } 659 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 660 break; 661 662 case lir_cmp_l2i: 663 case lir_cmp_fd2i: 664 case lir_ucmp_fd2i: 665 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 666 break; 667 668 case lir_cmove: 669 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr()); 670 break; 671 672 case lir_shl: 673 case lir_shr: 674 case lir_ushr: 675 if (op->in_opr2()->is_constant()) { 676 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr()); 677 } else { 678 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr()); 679 } 680 break; 681 682 case lir_add: 683 case lir_sub: 684 case lir_mul: 685 case lir_mul_strictfp: 686 case lir_div: 687 case lir_div_strictfp: 688 case lir_rem: 689 assert(op->fpu_pop_count() < 2, ""); 690 arith_op( 691 op->code(), 692 op->in_opr1(), 693 op->in_opr2(), 694 op->result_opr(), 695 op->info(), 696 op->fpu_pop_count() == 1); 697 break; 698 699 case lir_abs: 700 case lir_sqrt: 701 case lir_sin: 702 case lir_tan: 703 case lir_cos: 704 case lir_log: 705 case lir_log10: 706 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); 707 break; 708 709 case lir_logic_and: 710 case lir_logic_or: 711 case lir_logic_xor: 712 logic_op( 713 op->code(), 714 op->in_opr1(), 715 op->in_opr2(), 716 op->result_opr()); 717 break; 718 719 case lir_throw: 720 throw_op(op->in_opr1(), op->in_opr2(), op->info()); 721 break; 722 723 default: 724 Unimplemented(); 725 break; 726 } 727 } 728 729 730 void LIR_Assembler::build_frame() { 731 _masm->build_frame(initial_frame_size_in_bytes()); 732 } 733 734 735 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { 736 assert((src->is_single_fpu() && dest->is_single_stack()) || 737 (src->is_double_fpu() && dest->is_double_stack()), 738 "round_fp: rounds register -> stack location"); 739 740 reg2stack (src, dest, src->type(), pop_fpu_stack); 741 } 742 743 744 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) { 745 if (src->is_register()) { 746 if (dest->is_register()) { 747 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 748 reg2reg(src, dest); 749 } else if (dest->is_stack()) { 750 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 751 reg2stack(src, dest, type, pop_fpu_stack); 752 } else if (dest->is_address()) { 753 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned); 754 } else { 755 ShouldNotReachHere(); 756 } 757 758 } else if (src->is_stack()) { 759 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 760 if (dest->is_register()) { 761 stack2reg(src, dest, type); 762 } else if (dest->is_stack()) { 763 stack2stack(src, dest, type); 764 } else { 765 ShouldNotReachHere(); 766 } 767 768 } else if (src->is_constant()) { 769 if (dest->is_register()) { 770 const2reg(src, dest, patch_code, info); // patching is possible 771 } else if (dest->is_stack()) { 772 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 773 const2stack(src, dest); 774 } else if (dest->is_address()) { 775 assert(patch_code == lir_patch_none, "no patching allowed here"); 776 const2mem(src, dest, type, info); 777 } else { 778 ShouldNotReachHere(); 779 } 780 781 } else if (src->is_address()) { 782 mem2reg(src, dest, type, patch_code, info, unaligned); 783 784 } else { 785 ShouldNotReachHere(); 786 } 787 } 788 789 790 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) { 791 #ifndef PRODUCT 792 if (VerifyOopMaps || VerifyOops) { 793 bool v = VerifyOops; 794 VerifyOops = true; 795 OopMapStream s(info->oop_map()); 796 while (!s.is_done()) { 797 OopMapValue v = s.current(); 798 if (v.is_oop()) { 799 VMReg r = v.reg(); 800 if (!r->is_stack()) { 801 stringStream st; 802 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset()); 803 #ifdef SPARC 804 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__); 805 #else 806 _masm->verify_oop(r->as_Register()); 807 #endif 808 } else { 809 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); 810 } 811 } 812 s.next(); 813 } 814 VerifyOops = v; 815 } 816 #endif 817 }