1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_InstructionPrinter.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciInstance.hpp"
  34 #include "gc/shared/barrierSet.hpp"
  35 #include "runtime/os.hpp"
  36 
  37 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
  38   // We must have enough patching space so that call can be inserted.
  39   // We cannot use fat nops here, since the concurrent code rewrite may transiently
  40   // create the illegal instruction sequence.
  41   while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
  42     _masm->nop();
  43   }
  44   patch->install(_masm, patch_code, obj, info);
  45   append_code_stub(patch);
  46 
  47 #ifdef ASSERT
  48   Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
  49   if (patch->id() == PatchingStub::access_field_id) {
  50     switch (code) {
  51       case Bytecodes::_putstatic:
  52       case Bytecodes::_getstatic:
  53       case Bytecodes::_putfield:
  54       case Bytecodes::_getfield:
  55         break;
  56       default:
  57         ShouldNotReachHere();
  58     }
  59   } else if (patch->id() == PatchingStub::load_klass_id) {
  60     switch (code) {
  61       case Bytecodes::_new:
  62       case Bytecodes::_anewarray:
  63       case Bytecodes::_multianewarray:
  64       case Bytecodes::_instanceof:
  65       case Bytecodes::_checkcast:
  66         break;
  67       default:
  68         ShouldNotReachHere();
  69     }
  70   } else if (patch->id() == PatchingStub::load_mirror_id) {
  71     switch (code) {
  72       case Bytecodes::_putstatic:
  73       case Bytecodes::_getstatic:
  74       case Bytecodes::_ldc:
  75       case Bytecodes::_ldc_w:
  76         break;
  77       default:
  78         ShouldNotReachHere();
  79     }
  80   } else if (patch->id() == PatchingStub::load_appendix_id) {
  81     Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
  82     assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
  83   } else {
  84     ShouldNotReachHere();
  85   }
  86 #endif
  87 }
  88 
  89 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
  90   IRScope* scope = info->scope();
  91   Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
  92   if (Bytecodes::has_optional_appendix(bc_raw)) {
  93     return PatchingStub::load_appendix_id;
  94   }
  95   return PatchingStub::load_mirror_id;
  96 }
  97 
  98 //---------------------------------------------------------------
  99 
 100 
 101 LIR_Assembler::LIR_Assembler(Compilation* c):
 102    _masm(c->masm())
 103  , _bs(BarrierSet::barrier_set())
 104  , _compilation(c)
 105  , _frame_map(c->frame_map())
 106  , _current_block(NULL)
 107  , _pending_non_safepoint(NULL)
 108  , _pending_non_safepoint_offset(0)
 109 {
 110   _slow_case_stubs = new CodeStubList();
 111 }
 112 
 113 
 114 LIR_Assembler::~LIR_Assembler() {
 115   // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
 116   // Reset it here to avoid an assertion.
 117   _unwind_handler_entry.reset();
 118 }
 119 
 120 
 121 void LIR_Assembler::check_codespace() {
 122   CodeSection* cs = _masm->code_section();
 123   if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
 124     BAILOUT("CodeBuffer overflow");
 125   }
 126 }
 127 
 128 
 129 void LIR_Assembler::append_code_stub(CodeStub* stub) {
 130   _slow_case_stubs->append(stub);
 131 }
 132 
 133 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
 134   for (int m = 0; m < stub_list->length(); m++) {
 135     CodeStub* s = stub_list->at(m);
 136 
 137     check_codespace();
 138     CHECK_BAILOUT();
 139 
 140 #ifndef PRODUCT
 141     if (CommentedAssembly) {
 142       stringStream st;
 143       s->print_name(&st);
 144       st.print(" slow case");
 145       _masm->block_comment(st.as_string());
 146     }
 147 #endif
 148     s->emit_code(this);
 149 #ifdef ASSERT
 150     s->assert_no_unbound_labels();
 151 #endif
 152   }
 153 }
 154 
 155 
 156 void LIR_Assembler::emit_slow_case_stubs() {
 157   emit_stubs(_slow_case_stubs);
 158 }
 159 
 160 
 161 bool LIR_Assembler::needs_icache(ciMethod* method) const {
 162   return !method->is_static();
 163 }
 164 
 165 bool LIR_Assembler::needs_clinit_barrier_on_entry(ciMethod* method) const {
 166   return VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier();
 167 }
 168 
 169 int LIR_Assembler::code_offset() const {
 170   return _masm->offset();
 171 }
 172 
 173 
 174 address LIR_Assembler::pc() const {
 175   return _masm->pc();
 176 }
 177 
 178 // To bang the stack of this compiled method we use the stack size
 179 // that the interpreter would need in case of a deoptimization. This
 180 // removes the need to bang the stack in the deoptimization blob which
 181 // in turn simplifies stack overflow handling.
 182 int LIR_Assembler::bang_size_in_bytes() const {
 183   return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size());
 184 }
 185 
 186 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
 187   for (int i = 0; i < info_list->length(); i++) {
 188     XHandlers* handlers = info_list->at(i)->exception_handlers();
 189 
 190     for (int j = 0; j < handlers->length(); j++) {
 191       XHandler* handler = handlers->handler_at(j);
 192       assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
 193       assert(handler->entry_code() == NULL ||
 194              handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
 195              handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
 196 
 197       if (handler->entry_pco() == -1) {
 198         // entry code not emitted yet
 199         if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
 200           handler->set_entry_pco(code_offset());
 201           if (CommentedAssembly) {
 202             _masm->block_comment("Exception adapter block");
 203           }
 204           emit_lir_list(handler->entry_code());
 205         } else {
 206           handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
 207         }
 208 
 209         assert(handler->entry_pco() != -1, "must be set now");
 210       }
 211     }
 212   }
 213 }
 214 
 215 
 216 void LIR_Assembler::emit_code(BlockList* hir) {
 217   if (PrintLIR) {
 218     print_LIR(hir);
 219   }
 220 
 221   int n = hir->length();
 222   for (int i = 0; i < n; i++) {
 223     emit_block(hir->at(i));
 224     CHECK_BAILOUT();
 225   }
 226 
 227   flush_debug_info(code_offset());
 228 
 229   DEBUG_ONLY(check_no_unbound_labels());
 230 }
 231 
 232 
 233 void LIR_Assembler::emit_block(BlockBegin* block) {
 234   if (block->is_set(BlockBegin::backward_branch_target_flag)) {
 235     align_backward_branch_target();
 236   }
 237 
 238   // if this block is the start of an exception handler, record the
 239   // PC offset of the first instruction for later construction of
 240   // the ExceptionHandlerTable
 241   if (block->is_set(BlockBegin::exception_entry_flag)) {
 242     block->set_exception_handler_pco(code_offset());
 243   }
 244 
 245 #ifndef PRODUCT
 246   if (PrintLIRWithAssembly) {
 247     // don't print Phi's
 248     InstructionPrinter ip(false);
 249     block->print(ip);
 250   }
 251 #endif /* PRODUCT */
 252 
 253   assert(block->lir() != NULL, "must have LIR");
 254   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 255 
 256 #ifndef PRODUCT
 257   if (CommentedAssembly) {
 258     stringStream st;
 259     st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
 260     _masm->block_comment(st.as_string());
 261   }
 262 #endif
 263 
 264   emit_lir_list(block->lir());
 265 
 266   X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
 267 }
 268 
 269 
 270 void LIR_Assembler::emit_lir_list(LIR_List* list) {
 271   peephole(list);
 272 
 273   int n = list->length();
 274   for (int i = 0; i < n; i++) {
 275     LIR_Op* op = list->at(i);
 276 
 277     check_codespace();
 278     CHECK_BAILOUT();
 279 
 280 #ifndef PRODUCT
 281     if (CommentedAssembly) {
 282       // Don't record out every op since that's too verbose.  Print
 283       // branches since they include block and stub names.  Also print
 284       // patching moves since they generate funny looking code.
 285       if (op->code() == lir_branch ||
 286           (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
 287           (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
 288         stringStream st;
 289         op->print_on(&st);
 290         _masm->block_comment(st.as_string());
 291       }
 292     }
 293     if (PrintLIRWithAssembly) {
 294       // print out the LIR operation followed by the resulting assembly
 295       list->at(i)->print(); tty->cr();
 296     }
 297 #endif /* PRODUCT */
 298 
 299     op->emit_code(this);
 300 
 301     if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
 302       process_debug_info(op);
 303     }
 304 
 305 #ifndef PRODUCT
 306     if (PrintLIRWithAssembly) {
 307       _masm->code()->decode();
 308     }
 309 #endif /* PRODUCT */
 310   }
 311 }
 312 
 313 #ifdef ASSERT
 314 void LIR_Assembler::check_no_unbound_labels() {
 315   CHECK_BAILOUT();
 316 
 317   for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
 318     if (!_branch_target_blocks.at(i)->label()->is_bound()) {
 319       tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
 320       assert(false, "unbound label");
 321     }
 322   }
 323 }
 324 #endif
 325 
 326 //----------------------------------debug info--------------------------------
 327 
 328 
 329 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
 330   int pc_offset = code_offset();
 331   flush_debug_info(pc_offset);
 332   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 333   if (info->exception_handlers() != NULL) {
 334     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 335   }
 336 }
 337 
 338 
 339 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
 340   flush_debug_info(pc_offset);
 341   cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 342   if (cinfo->exception_handlers() != NULL) {
 343     compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
 344   }
 345 }
 346 
 347 static ValueStack* debug_info(Instruction* ins) {
 348   StateSplit* ss = ins->as_StateSplit();
 349   if (ss != NULL) return ss->state();
 350   return ins->state_before();
 351 }
 352 
 353 void LIR_Assembler::process_debug_info(LIR_Op* op) {
 354   Instruction* src = op->source();
 355   if (src == NULL)  return;
 356   int pc_offset = code_offset();
 357   if (_pending_non_safepoint == src) {
 358     _pending_non_safepoint_offset = pc_offset;
 359     return;
 360   }
 361   ValueStack* vstack = debug_info(src);
 362   if (vstack == NULL)  return;
 363   if (_pending_non_safepoint != NULL) {
 364     // Got some old debug info.  Get rid of it.
 365     if (debug_info(_pending_non_safepoint) == vstack) {
 366       _pending_non_safepoint_offset = pc_offset;
 367       return;
 368     }
 369     if (_pending_non_safepoint_offset < pc_offset) {
 370       record_non_safepoint_debug_info();
 371     }
 372     _pending_non_safepoint = NULL;
 373   }
 374   // Remember the debug info.
 375   if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
 376     _pending_non_safepoint = src;
 377     _pending_non_safepoint_offset = pc_offset;
 378   }
 379 }
 380 
 381 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
 382 // Return NULL if n is too large.
 383 // Returns the caller_bci for the next-younger state, also.
 384 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
 385   ValueStack* t = s;
 386   for (int i = 0; i < n; i++) {
 387     if (t == NULL)  break;
 388     t = t->caller_state();
 389   }
 390   if (t == NULL)  return NULL;
 391   for (;;) {
 392     ValueStack* tc = t->caller_state();
 393     if (tc == NULL)  return s;
 394     t = tc;
 395     bci_result = tc->bci();
 396     s = s->caller_state();
 397   }
 398 }
 399 
 400 void LIR_Assembler::record_non_safepoint_debug_info() {
 401   int         pc_offset = _pending_non_safepoint_offset;
 402   ValueStack* vstack    = debug_info(_pending_non_safepoint);
 403   int         bci       = vstack->bci();
 404 
 405   DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
 406   assert(debug_info->recording_non_safepoints(), "sanity");
 407 
 408   debug_info->add_non_safepoint(pc_offset);
 409 
 410   // Visit scopes from oldest to youngest.
 411   for (int n = 0; ; n++) {
 412     int s_bci = bci;
 413     ValueStack* s = nth_oldest(vstack, n, s_bci);
 414     if (s == NULL)  break;
 415     IRScope* scope = s->scope();
 416     //Always pass false for reexecute since these ScopeDescs are never used for deopt
 417     methodHandle null_mh;
 418     debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/);
 419   }
 420 
 421   debug_info->end_non_safepoint(pc_offset);
 422 }
 423 
 424 
 425 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
 426   return add_debug_info_for_null_check(code_offset(), cinfo);
 427 }
 428 
 429 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
 430   ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
 431   append_code_stub(stub);
 432   return stub;
 433 }
 434 
 435 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
 436   add_debug_info_for_div0(code_offset(), info);
 437 }
 438 
 439 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
 440   DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
 441   append_code_stub(stub);
 442 }
 443 
 444 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
 445   rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
 446 }
 447 
 448 
 449 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
 450   verify_oop_map(op->info());
 451 
 452   // must align calls sites, otherwise they can't be updated atomically
 453   align_call(op->code());
 454 
 455   // emit the static call stub stuff out of line
 456   emit_static_call_stub();
 457   CHECK_BAILOUT();
 458 
 459   switch (op->code()) {
 460   case lir_static_call:
 461   case lir_dynamic_call:
 462     call(op, relocInfo::static_call_type);
 463     break;
 464   case lir_optvirtual_call:
 465     call(op, relocInfo::opt_virtual_call_type);
 466     break;
 467   case lir_icvirtual_call:
 468     ic_call(op);
 469     break;
 470   case lir_virtual_call:
 471     vtable_call(op);
 472     break;
 473   default:
 474     fatal("unexpected op code: %s", op->name());
 475     break;
 476   }
 477 
 478   // JSR 292
 479   // Record if this method has MethodHandle invokes.
 480   if (op->is_method_handle_invoke()) {
 481     compilation()->set_has_method_handle_invokes(true);
 482   }
 483 
 484 #if defined(IA32) && defined(TIERED)
 485   // C2 leave fpu stack dirty clean it
 486   if (UseSSE < 2) {
 487     int i;
 488     for ( i = 1; i <= 7 ; i++ ) {
 489       ffree(i);
 490     }
 491     if (!op->result_opr()->is_float_kind()) {
 492       ffree(0);
 493     }
 494   }
 495 #endif // X86 && TIERED
 496 }
 497 
 498 
 499 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
 500   _masm->bind (*(op->label()));
 501 }
 502 
 503 
 504 void LIR_Assembler::emit_op1(LIR_Op1* op) {
 505   switch (op->code()) {
 506     case lir_move:
 507       if (op->move_kind() == lir_move_volatile) {
 508         assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
 509         volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
 510       } else {
 511         move_op(op->in_opr(), op->result_opr(), op->type(),
 512                 op->patch_code(), op->info(), op->pop_fpu_stack(),
 513                 op->move_kind() == lir_move_unaligned,
 514                 op->move_kind() == lir_move_wide);
 515       }
 516       break;
 517 
 518     case lir_roundfp: {
 519       LIR_OpRoundFP* round_op = op->as_OpRoundFP();
 520       roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
 521       break;
 522     }
 523 
 524     case lir_return:
 525       return_op(op->in_opr());
 526       break;
 527 
 528     case lir_safepoint:
 529       if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
 530         _masm->nop();
 531       }
 532       safepoint_poll(op->in_opr(), op->info());
 533       break;
 534 
 535 #ifdef IA32
 536     case lir_fxch:
 537       fxch(op->in_opr()->as_jint());
 538       break;
 539 
 540     case lir_fld:
 541       fld(op->in_opr()->as_jint());
 542       break;
 543 #endif // IA32
 544 
 545     case lir_branch:
 546       break;
 547 
 548     case lir_push:
 549       push(op->in_opr());
 550       break;
 551 
 552     case lir_pop:
 553       pop(op->in_opr());
 554       break;
 555 
 556     case lir_leal:
 557       leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
 558       break;
 559 
 560     case lir_null_check: {
 561       ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
 562 
 563       if (op->in_opr()->is_single_cpu()) {
 564         _masm->null_check(op->in_opr()->as_register(), stub->entry());
 565       } else {
 566         Unimplemented();
 567       }
 568       break;
 569     }
 570 
 571     case lir_monaddr:
 572       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
 573       break;
 574 
 575     case lir_unwind:
 576       unwind_op(op->in_opr());
 577       break;
 578 
 579     default:
 580       Unimplemented();
 581       break;
 582   }
 583 }
 584 
 585 
 586 void LIR_Assembler::emit_op0(LIR_Op0* op) {
 587   switch (op->code()) {
 588     case lir_nop:
 589       assert(op->info() == NULL, "not supported");
 590       _masm->nop();
 591       break;
 592 
 593     case lir_label:
 594       Unimplemented();
 595       break;
 596 
 597     case lir_std_entry:
 598       // init offsets
 599       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
 600       _masm->align(CodeEntryAlignment);
 601       if (needs_icache(compilation()->method())) {
 602         check_icache();
 603       }
 604       offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
 605       _masm->verified_entry();
 606       if (needs_clinit_barrier_on_entry(compilation()->method())) {
 607         clinit_barrier(compilation()->method());
 608       }
 609       build_frame();
 610       offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
 611       break;
 612 
 613     case lir_osr_entry:
 614       offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
 615       osr_entry();
 616       break;
 617 
 618 #ifdef IA32
 619     case lir_fpop_raw:
 620       fpop();
 621       break;
 622 #endif // IA32
 623 
 624     case lir_breakpoint:
 625       breakpoint();
 626       break;
 627 
 628     case lir_membar:
 629       membar();
 630       break;
 631 
 632     case lir_membar_acquire:
 633       membar_acquire();
 634       break;
 635 
 636     case lir_membar_release:
 637       membar_release();
 638       break;
 639 
 640     case lir_membar_loadload:
 641       membar_loadload();
 642       break;
 643 
 644     case lir_membar_storestore:
 645       membar_storestore();
 646       break;
 647 
 648     case lir_membar_loadstore:
 649       membar_loadstore();
 650       break;
 651 
 652     case lir_membar_storeload:
 653       membar_storeload();
 654       break;
 655 
 656     case lir_get_thread:
 657       get_thread(op->result_opr());
 658       break;
 659 
 660     case lir_on_spin_wait:
 661       on_spin_wait();
 662       break;
 663 
 664     default:
 665       ShouldNotReachHere();
 666       break;
 667   }
 668 }
 669 
 670 
 671 void LIR_Assembler::emit_op2(LIR_Op2* op) {
 672   switch (op->code()) {
 673     case lir_cmp:
 674       if (op->info() != NULL) {
 675         assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
 676                "shouldn't be codeemitinfo for non-address operands");
 677         add_debug_info_for_null_check_here(op->info()); // exception possible
 678       }
 679       comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
 680       break;
 681 
 682     case lir_cmp_l2i:
 683     case lir_cmp_fd2i:
 684     case lir_ucmp_fd2i:
 685       comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
 686       break;
 687 
 688     case lir_cmove:
 689       cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
 690       break;
 691 
 692     case lir_shl:
 693     case lir_shr:
 694     case lir_ushr:
 695       if (op->in_opr2()->is_constant()) {
 696         shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
 697       } else {
 698         shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
 699       }
 700       break;
 701 
 702     case lir_add:
 703     case lir_sub:
 704     case lir_mul:
 705     case lir_mul_strictfp:
 706     case lir_div:
 707     case lir_div_strictfp:
 708     case lir_rem:
 709       assert(op->fpu_pop_count() < 2, "");
 710       arith_op(
 711         op->code(),
 712         op->in_opr1(),
 713         op->in_opr2(),
 714         op->result_opr(),
 715         op->info(),
 716         op->fpu_pop_count() == 1);
 717       break;
 718 
 719     case lir_abs:
 720     case lir_sqrt:
 721     case lir_tan:
 722     case lir_log10:
 723       intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
 724       break;
 725 
 726     case lir_neg:
 727       negate(op->in_opr1(), op->result_opr(), op->in_opr2());
 728       break;
 729 
 730     case lir_logic_and:
 731     case lir_logic_or:
 732     case lir_logic_xor:
 733       logic_op(
 734         op->code(),
 735         op->in_opr1(),
 736         op->in_opr2(),
 737         op->result_opr());
 738       break;
 739 
 740     case lir_throw:
 741       throw_op(op->in_opr1(), op->in_opr2(), op->info());
 742       break;
 743 
 744     case lir_xadd:
 745     case lir_xchg:
 746       atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
 747       break;
 748 
 749     default:
 750       Unimplemented();
 751       break;
 752   }
 753 }
 754 
 755 
 756 void LIR_Assembler::build_frame() {
 757   _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 758 }
 759 
 760 
 761 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
 762   assert(strict_fp_requires_explicit_rounding, "not required");
 763   assert((src->is_single_fpu() && dest->is_single_stack()) ||
 764          (src->is_double_fpu() && dest->is_double_stack()),
 765          "round_fp: rounds register -> stack location");
 766 
 767   reg2stack (src, dest, src->type(), pop_fpu_stack);
 768 }
 769 
 770 
 771 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
 772   if (src->is_register()) {
 773     if (dest->is_register()) {
 774       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 775       reg2reg(src,  dest);
 776     } else if (dest->is_stack()) {
 777       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 778       reg2stack(src, dest, type, pop_fpu_stack);
 779     } else if (dest->is_address()) {
 780       reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
 781     } else {
 782       ShouldNotReachHere();
 783     }
 784 
 785   } else if (src->is_stack()) {
 786     assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 787     if (dest->is_register()) {
 788       stack2reg(src, dest, type);
 789     } else if (dest->is_stack()) {
 790       stack2stack(src, dest, type);
 791     } else {
 792       ShouldNotReachHere();
 793     }
 794 
 795   } else if (src->is_constant()) {
 796     if (dest->is_register()) {
 797       const2reg(src, dest, patch_code, info); // patching is possible
 798     } else if (dest->is_stack()) {
 799       assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
 800       const2stack(src, dest);
 801     } else if (dest->is_address()) {
 802       assert(patch_code == lir_patch_none, "no patching allowed here");
 803       const2mem(src, dest, type, info, wide);
 804     } else {
 805       ShouldNotReachHere();
 806     }
 807 
 808   } else if (src->is_address()) {
 809     mem2reg(src, dest, type, patch_code, info, wide, unaligned);
 810 
 811   } else {
 812     ShouldNotReachHere();
 813   }
 814 }
 815 
 816 
 817 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
 818 #ifndef PRODUCT
 819   if (VerifyOops) {
 820     OopMapStream s(info->oop_map());
 821     while (!s.is_done()) {
 822       OopMapValue v = s.current();
 823       if (v.is_oop()) {
 824         VMReg r = v.reg();
 825         if (!r->is_stack()) {
 826           stringStream st;
 827           st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
 828           _masm->verify_oop(r->as_Register());
 829         } else {
 830           _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
 831         }
 832       }
 833       check_codespace();
 834       CHECK_BAILOUT();
 835 
 836       s.next();
 837     }
 838   }
 839 #endif
 840 }