Print this page


Split Close
Expand all
Collapse all
          --- old/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
          +++ new/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
↓ open down ↓ 428 lines elided ↑ open up ↑
 429  429    // generate code for exception handler
 430  430    address handler_base = __ start_a_stub(exception_handler_size);
 431  431    if (handler_base == NULL) {
 432  432      // not enough space left for the handler
 433  433      bailout("exception handler overflow");
 434  434      return -1;
 435  435    }
 436  436  
 437  437    int offset = code_offset();
 438  438  
 439      -  // if the method does not have an exception handler, then there is
 440      -  // no reason to search for one
 441      -  if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
 442      -    // the exception oop and pc are in rax, and rdx
 443      -    // no other registers need to be preserved, so invalidate them
 444      -    __ invalidate_registers(false, true, true, false, true, true);
 445      -
 446      -    // check that there is really an exception
 447      -    __ verify_not_null_oop(rax);
 448      -
 449      -    // search an exception handler (rax: exception oop, rdx: throwing pc)
 450      -    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
 451      -
 452      -    // if the call returns here, then the exception handler for particular
 453      -    // exception doesn't exist -> unwind activation and forward exception to caller
 454      -  }
 455      -
 456      -  // the exception oop is in rax,
      439 +  // the exception oop and pc are in rax, and rdx
 457  440    // no other registers need to be preserved, so invalidate them
 458      -  __ invalidate_registers(false, true, true, true, true, true);
      441 +  __ invalidate_registers(false, true, true, false, true, true);
 459  442  
 460  443    // check that there is really an exception
 461  444    __ verify_not_null_oop(rax);
 462  445  
 463      -  // unlock the receiver/klass if necessary
 464      -  // rax,: exception
 465      -  ciMethod* method = compilation()->method();
 466      -  if (method->is_synchronized() && GenerateSynchronizationCode) {
 467      -    monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax);
 468      -  }
      446 +  // search an exception handler (rax: exception oop, rdx: throwing pc)
      447 +  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
      448 +
      449 +  __ stop("should not reach here");
 469  450  
 470      -  // unwind activation and forward exception to caller
 471      -  // rax,: exception
 472      -  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 473  451    assert(code_offset() - offset <= exception_handler_size, "overflow");
 474  452    __ end_a_stub();
 475  453  
 476  454    return offset;
 477  455  }
 478  456  
 479  457  
 480  458  int LIR_Assembler::emit_deopt_handler() {
 481  459    // if the last instruction is a call (typically to do a throw which
 482  460    // is coming at the end after block reordering) the return address
↓ open down ↓ 5 lines elided ↑ open up ↑
 488  466    // generate code for exception handler
 489  467    address handler_base = __ start_a_stub(deopt_handler_size);
 490  468    if (handler_base == NULL) {
 491  469      // not enough space left for the handler
 492  470      bailout("deopt handler overflow");
 493  471      return -1;
 494  472    }
 495  473  
 496  474    int offset = code_offset();
 497  475    InternalAddress here(__ pc());
      476 +
 498  477    __ pushptr(here.addr());
 499  478    __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
      479 +
 500  480    assert(code_offset() - offset <= deopt_handler_size, "overflow");
 501  481    __ end_a_stub();
 502  482  
 503  483    return offset;
 504  484  }
 505  485  
 506  486  
 507  487  // This is the fast version of java.lang.String.compare; it has not
 508  488  // OSR-entry and therefore, we generate a slow version for OSR's
 509  489  void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
↓ open down ↓ 76 lines elided ↑ open up ↑
 586  566  }
 587  567  
 588  568  
 589  569  void LIR_Assembler::return_op(LIR_Opr result) {
 590  570    assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
 591  571    if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
 592  572      assert(result->fpu() == 0, "result must already be on TOS");
 593  573    }
 594  574  
 595  575    // Pop the stack before the safepoint code
 596      -  __ leave();
      576 +  __ remove_frame(initial_frame_size_in_bytes());
 597  577  
 598  578    bool result_is_oop = result->is_valid() ? result->is_oop() : false;
 599  579  
 600  580    // Note: we do not need to round double result; float result has the right precision
 601  581    // the poll sets the condition code, but no data registers
 602  582    AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
 603  583                                relocInfo::poll_return_type);
 604  584  
 605  585    // NOTE: the requires that the polling page be reachable else the reloc
 606  586    // goes to the movq that loads the address and not the faulting instruction
↓ open down ↓ 2124 lines elided ↑ open up ↑
2731 2711  }
2732 2712  
2733 2713  
2734 2714  void LIR_Assembler::align_call(LIR_Code code) {
2735 2715    if (os::is_MP()) {
2736 2716      // make sure that the displacement word of the call ends up word aligned
2737 2717      int offset = __ offset();
2738 2718      switch (code) {
2739 2719        case lir_static_call:
2740 2720        case lir_optvirtual_call:
     2721 +      case lir_dynamic_call:
2741 2722          offset += NativeCall::displacement_offset;
2742 2723          break;
2743 2724        case lir_icvirtual_call:
2744 2725          offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2745 2726        break;
2746 2727        case lir_virtual_call:  // currently, sparc-specific for niagara
2747 2728        default: ShouldNotReachHere();
2748 2729      }
2749 2730      while (offset++ % BytesPerWord != 0) {
2750 2731        __ nop();
2751 2732      }
2752 2733    }
2753 2734  }
2754 2735  
2755 2736  
2756      -void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
     2737 +void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2757 2738    assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2758 2739           "must be aligned");
2759      -  __ call(AddressLiteral(entry, rtype));
2760      -  add_call_info(code_offset(), info);
     2740 +  __ call(AddressLiteral(op->addr(), rtype));
     2741 +  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
2761 2742  }
2762 2743  
2763 2744  
2764      -void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
     2745 +void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2765 2746    RelocationHolder rh = virtual_call_Relocation::spec(pc());
2766 2747    __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
2767 2748    assert(!os::is_MP() ||
2768 2749           (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2769 2750           "must be aligned");
2770      -  __ call(AddressLiteral(entry, rh));
2771      -  add_call_info(code_offset(), info);
     2751 +  __ call(AddressLiteral(op->addr(), rh));
     2752 +  add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
2772 2753  }
2773 2754  
2774 2755  
2775 2756  /* Currently, vtable-dispatch is only enabled for sparc platforms */
2776      -void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
     2757 +void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2777 2758    ShouldNotReachHere();
2778 2759  }
2779 2760  
     2761 +
     2762 +void LIR_Assembler::preserve_SP() {
     2763 +  __ movptr(rbp, rsp);
     2764 +}
     2765 +
     2766 +
     2767 +void LIR_Assembler::restore_SP() {
     2768 +  __ movptr(rsp, rbp);
     2769 +}
     2770 +
     2771 +
2780 2772  void LIR_Assembler::emit_static_call_stub() {
2781 2773    address call_pc = __ pc();
2782 2774    address stub = __ start_a_stub(call_stub_size);
2783 2775    if (stub == NULL) {
2784 2776      bailout("static call stub overflow");
2785 2777      return;
2786 2778    }
2787 2779  
2788 2780    int start = __ offset();
2789 2781    if (os::is_MP()) {
↓ open down ↓ 32 lines elided ↑ open up ↑
2822 2814      __ lea(exceptionPC->as_register(), pc_for_athrow);
2823 2815      add_call_info(pc_for_athrow_offset, info); // for exception handler
2824 2816  
2825 2817      __ verify_not_null_oop(rax);
2826 2818      // search an exception handler (rax: exception oop, rdx: throwing pc)
2827 2819      if (compilation()->has_fpu_code()) {
2828 2820        unwind_id = Runtime1::handle_exception_id;
2829 2821      } else {
2830 2822        unwind_id = Runtime1::handle_exception_nofpu_id;
2831 2823      }
     2824 +    __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2832 2825    } else {
2833      -    unwind_id = Runtime1::unwind_exception_id;
     2826 +    // remove the activation
     2827 +    __ remove_frame(initial_frame_size_in_bytes());
     2828 +    __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
2834 2829    }
2835      -  __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2836 2830  
2837 2831    // enough room for two byte trap
2838 2832    __ nop();
2839 2833  }
2840 2834  
2841 2835  
2842 2836  void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2843 2837  
2844 2838    // optimized version for linear scan:
2845 2839    // * count must be already in ECX (guaranteed by LinearScan)
↓ open down ↓ 620 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX