< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page


   1 /*
   2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"


  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c1/barrierSetC1.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/vm_version.hpp"
  43 #include "utilities/bitMap.inline.hpp"
  44 #include "utilities/macros.hpp"
  45 
  46 #ifdef ASSERT
  47 #define __ gen()->lir(__FILE__, __LINE__)->
  48 #else
  49 #define __ gen()->lir()->
  50 #endif
  51 
  52 #ifndef PATCHED_ADDR
  53 #define PATCHED_ADDR  (max_jint)
  54 #endif
  55 
  56 void PhiResolverState::reset(int max_vregs) {


 624     assert(right_op != result_op, "malformed");
 625     __ move(left_op, result_op);
 626     left_op = result_op;
 627   }
 628 
 629   switch(code) {
 630     case Bytecodes::_iand:
 631     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 632 
 633     case Bytecodes::_ior:
 634     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 635 
 636     case Bytecodes::_ixor:
 637     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 638 
 639     default: ShouldNotReachHere();
 640   }
 641 }
 642 
 643 
 644 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {

 645   if (!GenerateSynchronizationCode) return;
 646   // for slow path, use debug info for state after successful locking
 647   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
 648   __ load_stack_address_monitor(monitor_no, lock);
 649   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 650   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
 651 }
 652 
 653 
 654 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 655   if (!GenerateSynchronizationCode) return;
 656   // setup registers
 657   LIR_Opr hdr = lock;
 658   lock = new_hdr;
 659   CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
 660   __ load_stack_address_monitor(monitor_no, lock);
 661   __ unlock_object(hdr, object, lock, scratch, slow_path);
 662 }
 663 
 664 #ifndef PRODUCT
 665 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 666   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 667     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 668   } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
 669     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 670   }


 772       if (src_type != NULL) {
 773         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 774           is_exact = true;
 775           expected_type = dst_type;
 776         }
 777       }
 778     }
 779     // at least pass along a good guess
 780     if (expected_type == NULL) expected_type = dst_exact_type;
 781     if (expected_type == NULL) expected_type = src_declared_type;
 782     if (expected_type == NULL) expected_type = dst_declared_type;
 783 
 784     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 785     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 786   }
 787 
 788   // if a probable array type has been identified, figure out if any
 789   // of the required checks for a fast case can be elided.
 790   int flags = LIR_OpArrayCopy::all_flags;
 791 










 792   if (!src_objarray)
 793     flags &= ~LIR_OpArrayCopy::src_objarray;
 794   if (!dst_objarray)
 795     flags &= ~LIR_OpArrayCopy::dst_objarray;
 796 
 797   if (!x->arg_needs_null_check(0))
 798     flags &= ~LIR_OpArrayCopy::src_null_check;
 799   if (!x->arg_needs_null_check(2))
 800     flags &= ~LIR_OpArrayCopy::dst_null_check;
 801 
 802 
 803   if (expected_type != NULL) {
 804     Value length_limit = NULL;
 805 
 806     IfOp* ifop = length->as_IfOp();
 807     if (ifop != NULL) {
 808       // look for expressions like min(v, a.length) which ends up as
 809       //   x > y ? y : x  or  x >= y ? y : x
 810       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 811           ifop->x() == ifop->fval() &&


1513       value.load_byte_item();
1514     } else  {
1515       value.load_item();
1516     }
1517   } else {
1518     value.load_for_store(field_type);
1519   }
1520 
1521   set_no_result(x);
1522 
1523 #ifndef PRODUCT
1524   if (PrintNotLoaded && needs_patching) {
1525     tty->print_cr("   ###class not loaded at store_%s bci %d",
1526                   x->is_static() ?  "static" : "field", x->printable_bci());
1527   }
1528 #endif
1529 
1530   if (x->needs_null_check() &&
1531       (needs_patching ||
1532        MacroAssembler::needs_explicit_null_check(x->offset()))) {








1533     // Emit an explicit null check because the offset is too large.
1534     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1535     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1536     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1537   }

1538 
1539   DecoratorSet decorators = IN_HEAP;
1540   if (is_volatile) {
1541     decorators |= MO_SEQ_CST;
1542   }
1543   if (needs_patching) {
1544     decorators |= C1_NEEDS_PATCHING;
1545   }
1546 
1547   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1548                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1549 }
1550 
























































































































1551 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1552   assert(x->is_pinned(),"");


1553   bool needs_range_check = x->compute_needs_range_check();
1554   bool use_length = x->length() != NULL;
1555   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1556   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||

1557                                          !get_jobject_constant(x->value())->is_null_object() ||
1558                                          x->should_profile());
1559 
1560   LIRItem array(x->array(), this);
1561   LIRItem index(x->index(), this);
1562   LIRItem value(x->value(), this);
1563   LIRItem length(this);
1564 
1565   array.load_item();
1566   index.load_nonconstant();
1567 
1568   if (use_length && needs_range_check) {
1569     length.set_instruction(x->length());
1570     length.load_item();
1571 
1572   }
1573   if (needs_store_check || x->check_boolean()) {


1574     value.load_item();
1575   } else {
1576     value.load_for_store(x->elt_type());
1577   }
1578 
1579   set_no_result(x);
1580 
1581   // the CodeEmitInfo must be duplicated for each different
1582   // LIR-instruction because spilling can occur anywhere between two
1583   // instructions and so the debug information must be different
1584   CodeEmitInfo* range_check_info = state_for(x);
1585   CodeEmitInfo* null_check_info = NULL;
1586   if (x->needs_null_check()) {
1587     null_check_info = new CodeEmitInfo(range_check_info);
1588   }
1589 
1590   if (GenerateRangeChecks && needs_range_check) {
1591     if (use_length) {
1592       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1593       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1594     } else {
1595       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1596       // range_check also does the null check
1597       null_check_info = NULL;
1598     }
1599   }
1600 
1601   if (GenerateArrayStoreCheck && needs_store_check) {
1602     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1603     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1604   }
1605 



















1606   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1607   if (x->check_boolean()) {
1608     decorators |= C1_MASK_BOOLEAN;
1609   }
1610 
1611   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1612                   NULL, null_check_info);




1613 }
1614 
1615 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1616                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1617                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1618   decorators |= ACCESS_READ;
1619   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1620   if (access.is_raw()) {
1621     _barrier_set->BarrierSetC1::load_at(access, result);
1622   } else {
1623     _barrier_set->load_at(access, result);
1624   }
1625 }
1626 
1627 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1628                                LIR_Opr addr, LIR_Opr result) {
1629   decorators |= ACCESS_READ;
1630   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1631   access.set_resolved_addr(addr);
1632   if (access.is_raw()) {


1716       info = state_for(nc);
1717     }
1718   }
1719 
1720   LIRItem object(x->obj(), this);
1721 
1722   object.load_item();
1723 
1724 #ifndef PRODUCT
1725   if (PrintNotLoaded && needs_patching) {
1726     tty->print_cr("   ###class not loaded at load_%s bci %d",
1727                   x->is_static() ?  "static" : "field", x->printable_bci());
1728   }
1729 #endif
1730 
1731   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1732   if (x->needs_null_check() &&
1733       (needs_patching ||
1734        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1735        stress_deopt)) {








1736     LIR_Opr obj = object.result();
1737     if (stress_deopt) {
1738       obj = new_register(T_OBJECT);
1739       __ move(LIR_OprFact::oopConst(NULL), obj);
1740     }
1741     // Emit an explicit null check because the offset is too large.
1742     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1743     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1744     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1745   }









1746 
1747   DecoratorSet decorators = IN_HEAP;
1748   if (is_volatile) {
1749     decorators |= MO_SEQ_CST;
1750   }
1751   if (needs_patching) {
1752     decorators |= C1_NEEDS_PATCHING;
1753   }
1754 
1755   LIR_Opr result = rlock_result(x, field_type);
1756   access_load_at(decorators, field_type,
1757                  object, LIR_OprFact::intConst(x->offset()), result,
1758                  info ? new CodeEmitInfo(info) : NULL, info);












1759 }
1760 
1761 
1762 //------------------------java.nio.Buffer.checkIndex------------------------
1763 
1764 // int java.nio.Buffer.checkIndex(int)
1765 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1766   // NOTE: by the time we are in checkIndex() we are guaranteed that
1767   // the buffer is non-null (because checkIndex is package-private and
1768   // only called from within other methods in the buffer).
1769   assert(x->number_of_arguments() == 2, "wrong type");
1770   LIRItem buf  (x->argument_at(0), this);
1771   LIRItem index(x->argument_at(1), this);
1772   buf.load_item();
1773   index.load_item();
1774 
1775   LIR_Opr result = rlock_result(x);
1776   if (GenerateRangeChecks) {
1777     CodeEmitInfo* info = state_for(x);
1778     CodeStub* stub = new RangeCheckStub(info, index.result());


1853       __ move(LIR_OprFact::oopConst(NULL), obj);
1854       __ null_check(obj, new CodeEmitInfo(null_check_info));
1855     }
1856   }
1857 
1858   if (GenerateRangeChecks && needs_range_check) {
1859     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1860       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
1861     } else if (use_length) {
1862       // TODO: use a (modified) version of array_range_check that does not require a
1863       //       constant length to be loaded to a register
1864       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1865       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1866     } else {
1867       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1868       // The range check performs the null check, so clear it out for the load
1869       null_check_info = NULL;
1870     }
1871   }
1872 
1873   DecoratorSet decorators = IN_HEAP | IS_ARRAY;



1874 



1875   LIR_Opr result = rlock_result(x, x->elt_type());










1876   access_load_at(decorators, x->elt_type(),
1877                  array, index.result(), result,
1878                  NULL, null_check_info);





1879 }
1880 
1881 
1882 void LIRGenerator::do_NullCheck(NullCheck* x) {
1883   if (x->can_trap()) {
1884     LIRItem value(x->obj(), this);
1885     value.load_item();
1886     CodeEmitInfo* info = state_for(x);
1887     __ null_check(value.result(), info);
1888   }
1889 }
1890 
1891 
1892 void LIRGenerator::do_TypeCast(TypeCast* x) {
1893   LIRItem value(x->obj(), this);
1894   value.load_item();
1895   // the result is the same as from the node we are casting
1896   set_result(x, value.result());
1897 }
1898 


2718 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2719   // construct our frame and model the production of incoming pointer
2720   // to the OSR buffer.
2721   __ osr_entry(LIR_Assembler::osrBufferPointer());
2722   LIR_Opr result = rlock_result(x);
2723   __ move(LIR_Assembler::osrBufferPointer(), result);
2724 }
2725 
2726 
2727 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2728   assert(args->length() == arg_list->length(),
2729          "args=%d, arg_list=%d", args->length(), arg_list->length());
2730   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2731     LIRItem* param = args->at(i);
2732     LIR_Opr loc = arg_list->at(i);
2733     if (loc->is_register()) {
2734       param->load_item_force(loc);
2735     } else {
2736       LIR_Address* addr = loc->as_address_ptr();
2737       param->load_for_store(addr->type());

2738       if (addr->type() == T_OBJECT) {
2739         __ move_wide(param->result(), addr);
2740       } else
2741         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2742           __ unaligned_move(param->result(), addr);
2743         } else {
2744           __ move(param->result(), addr);
2745         }
2746     }
2747   }
2748 
2749   if (x->has_receiver()) {
2750     LIRItem* receiver = args->at(0);
2751     LIR_Opr loc = arg_list->at(0);
2752     if (loc->is_register()) {
2753       receiver->load_item_force(loc);
2754     } else {
2755       assert(loc->is_address(), "just checking");
2756       receiver->load_for_store(T_OBJECT);
2757       __ move_wide(receiver->result(), loc->as_address_ptr());


   1 /*
   2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"
  37 #include "ci/ciValueArrayKlass.hpp"
  38 #include "ci/ciValueKlass.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/c1/barrierSetC1.hpp"
  41 #include "runtime/arguments.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/vm_version.hpp"
  45 #include "utilities/bitMap.inline.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 #ifdef ASSERT
  49 #define __ gen()->lir(__FILE__, __LINE__)->
  50 #else
  51 #define __ gen()->lir()->
  52 #endif
  53 
  54 #ifndef PATCHED_ADDR
  55 #define PATCHED_ADDR  (max_jint)
  56 #endif
  57 
  58 void PhiResolverState::reset(int max_vregs) {


 626     assert(right_op != result_op, "malformed");
 627     __ move(left_op, result_op);
 628     left_op = result_op;
 629   }
 630 
 631   switch(code) {
 632     case Bytecodes::_iand:
 633     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
 634 
 635     case Bytecodes::_ior:
 636     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
 637 
 638     case Bytecodes::_ixor:
 639     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
 640 
 641     default: ShouldNotReachHere();
 642   }
 643 }
 644 
 645 
 646 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
 647                                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) {
 648   if (!GenerateSynchronizationCode) return;
 649   // for slow path, use debug info for state after successful locking
 650   CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch);
 651   __ load_stack_address_monitor(monitor_no, lock);
 652   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
 653   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub);
 654 }
 655 
 656 
 657 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
 658   if (!GenerateSynchronizationCode) return;
 659   // setup registers
 660   LIR_Opr hdr = lock;
 661   lock = new_hdr;
 662   CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
 663   __ load_stack_address_monitor(monitor_no, lock);
 664   __ unlock_object(hdr, object, lock, scratch, slow_path);
 665 }
 666 
 667 #ifndef PRODUCT
 668 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
 669   if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
 670     tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
 671   } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
 672     tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
 673   }


 775       if (src_type != NULL) {
 776         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
 777           is_exact = true;
 778           expected_type = dst_type;
 779         }
 780       }
 781     }
 782     // at least pass along a good guess
 783     if (expected_type == NULL) expected_type = dst_exact_type;
 784     if (expected_type == NULL) expected_type = src_declared_type;
 785     if (expected_type == NULL) expected_type = dst_declared_type;
 786 
 787     src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
 788     dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
 789   }
 790 
 791   // if a probable array type has been identified, figure out if any
 792   // of the required checks for a fast case can be elided.
 793   int flags = LIR_OpArrayCopy::all_flags;
 794 
 795   if (!src->is_loaded_flattened_array() && !dst->is_loaded_flattened_array()) {
 796     flags &= ~LIR_OpArrayCopy::always_slow_path;
 797   }
 798   if (!src->maybe_flattened_array()) {
 799     flags &= ~LIR_OpArrayCopy::src_flat_check;
 800   }
 801   if (!dst->maybe_flattened_array()) {
 802     flags &= ~LIR_OpArrayCopy::dst_flat_check;
 803   }
 804 
 805   if (!src_objarray)
 806     flags &= ~LIR_OpArrayCopy::src_objarray;
 807   if (!dst_objarray)
 808     flags &= ~LIR_OpArrayCopy::dst_objarray;
 809 
 810   if (!x->arg_needs_null_check(0))
 811     flags &= ~LIR_OpArrayCopy::src_null_check;
 812   if (!x->arg_needs_null_check(2))
 813     flags &= ~LIR_OpArrayCopy::dst_null_check;
 814 
 815 
 816   if (expected_type != NULL) {
 817     Value length_limit = NULL;
 818 
 819     IfOp* ifop = length->as_IfOp();
 820     if (ifop != NULL) {
 821       // look for expressions like min(v, a.length) which ends up as
 822       //   x > y ? y : x  or  x >= y ? y : x
 823       if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
 824           ifop->x() == ifop->fval() &&


1526       value.load_byte_item();
1527     } else  {
1528       value.load_item();
1529     }
1530   } else {
1531     value.load_for_store(field_type);
1532   }
1533 
1534   set_no_result(x);
1535 
1536 #ifndef PRODUCT
1537   if (PrintNotLoaded && needs_patching) {
1538     tty->print_cr("   ###class not loaded at store_%s bci %d",
1539                   x->is_static() ?  "static" : "field", x->printable_bci());
1540   }
1541 #endif
1542 
1543   if (x->needs_null_check() &&
1544       (needs_patching ||
1545        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1546     if (needs_patching && x->field()->signature()->starts_with("Q", 1)) {
1547       // We are storing a field of type "QT;", but T is not yet loaded, so we don't
1548       // know whether this field is flattened or not. Let's deoptimize and recompile.
1549       CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
1550                                           Deoptimization::Reason_unloaded,
1551                                           Deoptimization::Action_make_not_entrant);
1552       __ branch(lir_cond_always, T_ILLEGAL, stub);
1553     } else {
1554       // Emit an explicit null check because the offset is too large.
1555       // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1556       // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1557       __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1558     }
1559   }
1560 
1561   DecoratorSet decorators = IN_HEAP;
1562   if (is_volatile) {
1563     decorators |= MO_SEQ_CST;
1564   }
1565   if (needs_patching) {
1566     decorators |= C1_NEEDS_PATCHING;
1567   }
1568 
1569   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1570                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1571 }
1572 
1573 // FIXME -- I can't find any other way to pass an address to access_load_at().
1574 class TempResolvedAddress: public Instruction {
1575  public:
1576   TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1577     set_operand(addr);
1578   }
1579   virtual void input_values_do(ValueVisitor*) {}
1580   virtual void visit(InstructionVisitor* v)   {}
1581   virtual const char* name() const  { return "TempResolvedAddress"; }
1582 };
1583 
1584 void LIRGenerator::access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item) {
1585   // Find the starting address of the source (inside the array)
1586   ciType* array_type = array.value()->declared_type();
1587   ciValueArrayKlass* value_array_klass = array_type->as_value_array_klass();
1588   assert(value_array_klass->is_loaded(), "must be");
1589 
1590   ciValueKlass* elem_klass = value_array_klass->element_klass()->as_value_klass();
1591   int array_header_size = value_array_klass->array_header_in_bytes();
1592   int shift = value_array_klass->log2_element_size();
1593 
1594 #ifndef _LP64
1595   LIR_Opr index_op = new_register(T_INT);
1596   // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
1597   // the top (shift+1) bits of index_op must be zero, or
1598   // else throw ArrayIndexOutOfBoundsException
1599   if (index.result()->is_constant()) {
1600     jint const_index = index.result()->as_jint();
1601     __ move(LIR_OprFact::intConst(const_index << shift), index_op);
1602   } else {
1603     __ shift_left(index_op, shift, index.result());
1604   }
1605 #else
1606   LIR_Opr index_op = new_register(T_LONG);
1607   if (index.result()->is_constant()) {
1608     jint const_index = index.result()->as_jint();
1609     __ move(LIR_OprFact::longConst(const_index << shift), index_op);
1610   } else {
1611     __ convert(Bytecodes::_i2l, index.result(), index_op);
1612     // Need to shift manually, as LIR_Address can scale only up to 3.
1613     __ shift_left(index_op, shift, index_op);
1614   }
1615 #endif
1616 
1617   LIR_Opr elm_op = new_pointer_register();
1618   LIR_Address* elm_address = new LIR_Address(array.result(), index_op, array_header_size, T_ADDRESS);
1619   __ leal(LIR_OprFact::address(elm_address), elm_op);
1620 
1621   for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1622     ciField* inner_field = elem_klass->nonstatic_field_at(i);
1623     assert(!inner_field->is_flattened(), "flattened fields must have been expanded");
1624     int obj_offset = inner_field->offset();
1625     int elm_offset = obj_offset - elem_klass->first_field_offset(); // object header is not stored in array.
1626 
1627     BasicType field_type = inner_field->type()->basic_type();
1628     switch (field_type) {
1629     case T_BYTE:
1630     case T_BOOLEAN:
1631     case T_SHORT:
1632     case T_CHAR:
1633      field_type = T_INT;
1634       break;
1635     default:
1636       break;
1637     }
1638 
1639     LIR_Opr temp = new_register(field_type);
1640     TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1641     LIRItem elm_item(elm_resolved_addr, this);
1642 
1643     DecoratorSet decorators = IN_HEAP;
1644     if (is_load) {
1645       access_load_at(decorators, field_type,
1646                      elm_item, LIR_OprFact::intConst(elm_offset), temp,
1647                      NULL, NULL);
1648       access_store_at(decorators, field_type,
1649                       obj_item, LIR_OprFact::intConst(obj_offset), temp,
1650                       NULL, NULL);
1651     } else {
1652     access_load_at(decorators, field_type,
1653                    obj_item, LIR_OprFact::intConst(obj_offset), temp,
1654                    NULL, NULL);
1655     access_store_at(decorators, field_type,
1656                     elm_item, LIR_OprFact::intConst(elm_offset), temp,
1657                     NULL, NULL);
1658     }
1659   }
1660 }
1661 
1662 void LIRGenerator::check_flattened_array(LIRItem& array, CodeStub* slow_path) {
1663   LIR_Opr array_klass_reg = new_register(T_METADATA);
1664 
1665   __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg);
1666   LIR_Opr layout = new_register(T_INT);
1667   __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1668   __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
1669   __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
1670   __ branch(lir_cond_equal, T_ILLEGAL, slow_path);
1671 }
1672 
1673 bool LIRGenerator::needs_flattened_array_store_check(StoreIndexed* x) {
1674   if (ValueArrayFlatten && x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
1675     ciType* type = x->value()->declared_type();
1676     if (type != NULL && type->is_klass()) {
1677       ciKlass* klass = type->as_klass();
1678       if (klass->is_loaded() &&
1679           !(klass->is_valuetype() && klass->as_value_klass()->flatten_array()) &&
1680           !klass->is_java_lang_Object() &&
1681           !klass->is_interface()) {
1682         // This is known to be a non-flattenable object. If the array is flattened,
1683         // it will be caught by the code generated by array_store_check().
1684         return false;
1685       }
1686     }
1687     // We're not 100% sure, so let's do the flattened_array_store_check.
1688     return true;
1689   }
1690   return false;
1691 }
1692 
1693 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1694   assert(x->is_pinned(),"");
1695   assert(x->elt_type() != T_ARRAY, "never used");
1696   bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array();
1697   bool needs_range_check = x->compute_needs_range_check();
1698   bool use_length = x->length() != NULL;
1699   bool obj_store = x->elt_type() == T_OBJECT;
1700   bool needs_store_check = obj_store && !is_loaded_flattened_array &&
1701                                         (x->value()->as_Constant() == NULL ||
1702                                          !get_jobject_constant(x->value())->is_null_object() ||
1703                                          x->should_profile());
1704 
1705   LIRItem array(x->array(), this);
1706   LIRItem index(x->index(), this);
1707   LIRItem value(x->value(), this);
1708   LIRItem length(this);
1709 
1710   array.load_item();
1711   index.load_nonconstant();
1712 
1713   if (use_length && needs_range_check) {
1714     length.set_instruction(x->length());
1715     length.load_item();

1716   }
1717 
1718   if (needs_store_check || x->check_boolean()
1719       || is_loaded_flattened_array || needs_flattened_array_store_check(x)) {
1720     value.load_item();
1721   } else {
1722     value.load_for_store(x->elt_type());
1723   }
1724 
1725   set_no_result(x);
1726 
1727   // the CodeEmitInfo must be duplicated for each different
1728   // LIR-instruction because spilling can occur anywhere between two
1729   // instructions and so the debug information must be different
1730   CodeEmitInfo* range_check_info = state_for(x);
1731   CodeEmitInfo* null_check_info = NULL;
1732   if (x->needs_null_check()) {
1733     null_check_info = new CodeEmitInfo(range_check_info);
1734   }
1735 
1736   if (GenerateRangeChecks && needs_range_check) {
1737     if (use_length) {
1738       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1739       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1740     } else {
1741       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1742       // range_check also does the null check
1743       null_check_info = NULL;
1744     }
1745   }
1746 
1747   if (GenerateArrayStoreCheck && needs_store_check) {
1748     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1749     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1750   }
1751 
1752   if (is_loaded_flattened_array) {
1753     if (!x->is_exact_flattened_array_store()) {
1754       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1755       ciKlass* element_klass = x->array()->declared_type()->as_value_array_klass()->element_klass();
1756       flattened_array_store_check(value.result(), element_klass, info);
1757     } else if (!x->value()->is_never_null()) {
1758       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1759     }
1760     access_flattened_array(false, array, index, value);
1761   } else {
1762     StoreFlattenedArrayStub* slow_path = NULL;
1763 
1764     if (needs_flattened_array_store_check(x)) {
1765       // Check if we indeed have a flattened array
1766       index.load_item();
1767       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x));
1768       check_flattened_array(array, slow_path);
1769     }
1770 
1771     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1772     if (x->check_boolean()) {
1773       decorators |= C1_MASK_BOOLEAN;
1774     }
1775 
1776     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1777                     NULL, null_check_info);
1778     if (slow_path != NULL) {
1779       __ branch_destination(slow_path->continuation());
1780     }
1781   }
1782 }
1783 
1784 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1785                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1786                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1787   decorators |= ACCESS_READ;
1788   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1789   if (access.is_raw()) {
1790     _barrier_set->BarrierSetC1::load_at(access, result);
1791   } else {
1792     _barrier_set->load_at(access, result);
1793   }
1794 }
1795 
1796 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1797                                LIR_Opr addr, LIR_Opr result) {
1798   decorators |= ACCESS_READ;
1799   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1800   access.set_resolved_addr(addr);
1801   if (access.is_raw()) {


1885       info = state_for(nc);
1886     }
1887   }
1888 
1889   LIRItem object(x->obj(), this);
1890 
1891   object.load_item();
1892 
1893 #ifndef PRODUCT
1894   if (PrintNotLoaded && needs_patching) {
1895     tty->print_cr("   ###class not loaded at load_%s bci %d",
1896                   x->is_static() ?  "static" : "field", x->printable_bci());
1897   }
1898 #endif
1899 
1900   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1901   if (x->needs_null_check() &&
1902       (needs_patching ||
1903        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1904        stress_deopt)) {
1905     if (needs_patching && x->field()->signature()->starts_with("Q", 1)) {
1906       // We are loading a field of type "QT;", but class T is not yet loaded. We don't know
1907       // whether this field is flattened or not. Let's deoptimize and recompile.
1908       CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
1909                                           Deoptimization::Reason_unloaded,
1910                                           Deoptimization::Action_make_not_entrant);
1911       __ branch(lir_cond_always, T_ILLEGAL, stub);
1912     } else {
1913       LIR_Opr obj = object.result();
1914       if (stress_deopt) {
1915         obj = new_register(T_OBJECT);
1916         __ move(LIR_OprFact::oopConst(NULL), obj);
1917       }
1918       // Emit an explicit null check because the offset is too large.
1919       // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1920       // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1921       __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1922     }
1923   } else if (x->value_klass() != NULL && x->default_value() == NULL) {
1924     assert(x->is_static() && !x->value_klass()->is_loaded(), "must be");
1925     assert(needs_patching, "must be");
1926     // The value klass was not loaded so we don't know what its default value should be
1927     CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
1928                                         Deoptimization::Reason_unloaded,
1929                                         Deoptimization::Action_make_not_entrant);
1930     __ branch(lir_cond_always, T_ILLEGAL, stub);
1931   }
1932 
1933   DecoratorSet decorators = IN_HEAP;
1934   if (is_volatile) {
1935     decorators |= MO_SEQ_CST;
1936   }
1937   if (needs_patching) {
1938     decorators |= C1_NEEDS_PATCHING;
1939   }
1940 
1941   LIR_Opr result = rlock_result(x, field_type);
1942   access_load_at(decorators, field_type,
1943                  object, LIR_OprFact::intConst(x->offset()), result,
1944                  info ? new CodeEmitInfo(info) : NULL, info);
1945 
1946   if (x->value_klass() != NULL && x->default_value() != NULL) {
1947     LabelObj* L_end = new LabelObj();
1948     __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL));
1949     __ branch(lir_cond_notEqual, T_OBJECT, L_end->label());
1950 
1951     LIRItem default_value(x->default_value(), this);
1952     default_value.load_item();
1953     __ move(default_value.result(), result);
1954 
1955     __ branch_destination(L_end->label());
1956   }
1957 }
1958 
1959 
1960 //------------------------java.nio.Buffer.checkIndex------------------------
1961 
1962 // int java.nio.Buffer.checkIndex(int)
1963 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1964   // NOTE: by the time we are in checkIndex() we are guaranteed that
1965   // the buffer is non-null (because checkIndex is package-private and
1966   // only called from within other methods in the buffer).
1967   assert(x->number_of_arguments() == 2, "wrong type");
1968   LIRItem buf  (x->argument_at(0), this);
1969   LIRItem index(x->argument_at(1), this);
1970   buf.load_item();
1971   index.load_item();
1972 
1973   LIR_Opr result = rlock_result(x);
1974   if (GenerateRangeChecks) {
1975     CodeEmitInfo* info = state_for(x);
1976     CodeStub* stub = new RangeCheckStub(info, index.result());


2051       __ move(LIR_OprFact::oopConst(NULL), obj);
2052       __ null_check(obj, new CodeEmitInfo(null_check_info));
2053     }
2054   }
2055 
2056   if (GenerateRangeChecks && needs_range_check) {
2057     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2058       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
2059     } else if (use_length) {
2060       // TODO: use a (modified) version of array_range_check that does not require a
2061       //       constant length to be loaded to a register
2062       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2063       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
2064     } else {
2065       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2066       // The range check performs the null check, so clear it out for the load
2067       null_check_info = NULL;
2068     }
2069   }
2070 
2071   if (x->array()->is_loaded_flattened_array()) {
2072     // Find the destination address (of the NewValueTypeInstance)
2073     LIR_Opr obj = x->vt()->operand();
2074     LIRItem obj_item(x->vt(), this);
2075 
2076     access_flattened_array(true, array, index, obj_item);
2077     set_no_result(x);
2078   } else {
2079     LIR_Opr result = rlock_result(x, x->elt_type());
2080     LoadFlattenedArrayStub* slow_path = NULL;
2081 
2082     if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
2083       index.load_item();
2084       // if we are loading from flattened array, load it using a runtime call
2085       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x));
2086       check_flattened_array(array, slow_path);
2087     }
2088 
2089     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2090     access_load_at(decorators, x->elt_type(),
2091                    array, index.result(), result,
2092                    NULL, null_check_info);
2093 
2094     if (slow_path != NULL) {
2095       __ branch_destination(slow_path->continuation());
2096     }
2097   }
2098 }
2099 
2100 
2101 void LIRGenerator::do_NullCheck(NullCheck* x) {
2102   if (x->can_trap()) {
2103     LIRItem value(x->obj(), this);
2104     value.load_item();
2105     CodeEmitInfo* info = state_for(x);
2106     __ null_check(value.result(), info);
2107   }
2108 }
2109 
2110 
2111 void LIRGenerator::do_TypeCast(TypeCast* x) {
2112   LIRItem value(x->obj(), this);
2113   value.load_item();
2114   // the result is the same as from the node we are casting
2115   set_result(x, value.result());
2116 }
2117 


2937 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2938   // construct our frame and model the production of incoming pointer
2939   // to the OSR buffer.
2940   __ osr_entry(LIR_Assembler::osrBufferPointer());
2941   LIR_Opr result = rlock_result(x);
2942   __ move(LIR_Assembler::osrBufferPointer(), result);
2943 }
2944 
2945 
2946 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2947   assert(args->length() == arg_list->length(),
2948          "args=%d, arg_list=%d", args->length(), arg_list->length());
2949   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2950     LIRItem* param = args->at(i);
2951     LIR_Opr loc = arg_list->at(i);
2952     if (loc->is_register()) {
2953       param->load_item_force(loc);
2954     } else {
2955       LIR_Address* addr = loc->as_address_ptr();
2956       param->load_for_store(addr->type());
2957       assert(addr->type() != T_VALUETYPE, "not supported yet");
2958       if (addr->type() == T_OBJECT) {
2959         __ move_wide(param->result(), addr);
2960       } else
2961         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2962           __ unaligned_move(param->result(), addr);
2963         } else {
2964           __ move(param->result(), addr);
2965         }
2966     }
2967   }
2968 
2969   if (x->has_receiver()) {
2970     LIRItem* receiver = args->at(0);
2971     LIR_Opr loc = arg_list->at(0);
2972     if (loc->is_register()) {
2973       receiver->load_item_force(loc);
2974     } else {
2975       assert(loc->is_address(), "just checking");
2976       receiver->load_for_store(T_OBJECT);
2977       __ move_wide(receiver->result(), loc->as_address_ptr());


< prev index next >