src/share/vm/c1/c1_LIRGenerator.cpp

Print this page
rev 4136 : 7153771: array bound check elimination for c1
Summary: when possible optimize out array bound checks, inserting predicates when needed.
Reviewed-by:


 386 }
 387 
 388 
 389 // This is called for each node in tree; the walk stops if a root is reached
 390 void LIRGenerator::walk(Value instr) {
 391   InstructionMark im(compilation(), instr);
 392   //stop walk when encounter a root
 393   if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
 394     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
 395   } else {
 396     assert(instr->subst() == instr, "shouldn't have missed substitution");
 397     instr->visit(this);
 398     // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
 399   }
 400 }
 401 
 402 
 403 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
 404   assert(state != NULL, "state must be defined");
 405 




 406   ValueStack* s = state;
 407   for_each_state(s) {
 408     if (s->kind() == ValueStack::EmptyExceptionState) {
 409       assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
 410       continue;
 411     }
 412 
 413     int index;
 414     Value value;
 415     for_each_stack_value(s, index, value) {
 416       assert(value->subst() == value, "missed substitution");
 417       if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 418         walk(value);
 419         assert(value->operand()->is_valid(), "must be evaluated now");
 420       }
 421     }
 422 
 423     int bci = s->bci();
 424     IRScope* scope = s->scope();
 425     ciMethod* method = scope->method();


 436     if (!liveness.is_valid()) {
 437       // Degenerate or breakpointed method.
 438       bailout("Degenerate or breakpointed method");
 439     } else {
 440       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
 441       for_each_local_value(s, index, value) {
 442         assert(value->subst() == value, "missed substition");
 443         if (liveness.at(index) && !value->type()->is_illegal()) {
 444           if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 445             walk(value);
 446             assert(value->operand()->is_valid(), "must be evaluated now");
 447           }
 448         } else {
 449           // NULL out this local so that linear scan can assume that all non-NULL values are live.
 450           s->invalidate_local(index);
 451         }
 452       }
 453     }
 454   }
 455 
 456   return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
 457 }
 458 
 459 
 460 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
 461   return state_for(x, x->exception_state());
 462 }
 463 
 464 
 465 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) {
 466   if (!obj->is_loaded() || PatchALot) {
 467     assert(info != NULL, "info must be set if class is not loaded");
 468     __ klass2reg_patch(NULL, r, info);
 469   } else {
 470     // no patching needed
 471     __ metadata2reg(obj->constant_encoding(), r);
 472   }
 473 }
 474 
 475 
 476 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,


1775   } else if (x->needs_null_check()) {
1776     NullCheck* nc = x->explicit_null_check();
1777     if (nc == NULL) {
1778       info = state_for(x);
1779     } else {
1780       info = state_for(nc);
1781     }
1782   }
1783 
1784   LIRItem object(x->obj(), this);
1785 
1786   object.load_item();
1787 
1788 #ifndef PRODUCT
1789   if (PrintNotLoaded && needs_patching) {
1790     tty->print_cr("   ###class not loaded at load_%s bci %d",
1791                   x->is_static() ?  "static" : "field", x->printable_bci());
1792   }
1793 #endif
1794 

1795   if (x->needs_null_check() &&
1796       (needs_patching ||
1797        MacroAssembler::needs_explicit_null_check(x->offset()))) {






1798     // emit an explicit null check because the offset is too large
1799     __ null_check(object.result(), new CodeEmitInfo(info));
1800   }
1801 
1802   LIR_Opr reg = rlock_result(x, field_type);
1803   LIR_Address* address;
1804   if (needs_patching) {
1805     // we need to patch the offset in the instruction so don't allow
1806     // generate_address to try to be smart about emitting the -1.
1807     // Otherwise the patching code won't know how to find the
1808     // instruction to patch.
1809     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1810   } else {
1811     address = generate_address(object.result(), x->offset(), field_type);
1812   }
1813 
1814   if (is_volatile && !needs_patching) {
1815     volatile_field_load(address, reg, info);
1816   } else {
1817     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1818     __ load(address, reg, info, patch_code);
1819   }


1844     if (index.result()->is_constant()) {
1845       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1846       __ branch(lir_cond_belowEqual, T_INT, stub);
1847     } else {
1848       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1849                   java_nio_Buffer::limit_offset(), T_INT, info);
1850       __ branch(lir_cond_aboveEqual, T_INT, stub);
1851     }
1852     __ move(index.result(), result);
1853   } else {
1854     // Just load the index into the result register
1855     __ move(index.result(), result);
1856   }
1857 }
1858 
1859 
1860 //------------------------array access--------------------------------------
1861 
1862 
1863 void LIRGenerator::do_ArrayLength(ArrayLength* x) {


1864   LIRItem array(x->array(), this);
1865   array.load_item();
1866   LIR_Opr reg = rlock_result(x);
1867 
1868   CodeEmitInfo* info = NULL;
1869   if (x->needs_null_check()) {
1870     NullCheck* nc = x->explicit_null_check();
1871     if (nc == NULL) {
1872       info = state_for(x);
1873     } else {
1874       info = state_for(nc);
1875     }





1876   }
1877   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1878 }
1879 
1880 
1881 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1882   bool use_length = x->length() != NULL;
1883   LIRItem array(x->array(), this);
1884   LIRItem index(x->index(), this);
1885   LIRItem length(this);
1886   bool needs_range_check = true;
1887 
1888   if (use_length) {
1889     needs_range_check = x->compute_needs_range_check();
1890     if (needs_range_check) {
1891       length.set_instruction(x->length());
1892       length.load_item();
1893     }
1894   }
1895 
1896   array.load_item();
1897   if (index.is_constant() && can_inline_as_constant(x->index())) {
1898     // let it be a constant
1899     index.dont_load_item();
1900   } else {
1901     index.load_item();
1902   }
1903 
1904   CodeEmitInfo* range_check_info = state_for(x);
1905   CodeEmitInfo* null_check_info = NULL;
1906   if (x->needs_null_check()) {
1907     NullCheck* nc = x->explicit_null_check();
1908     if (nc != NULL) {
1909       null_check_info = state_for(nc);
1910     } else {
1911       null_check_info = range_check_info;
1912     }





1913   }
1914 
1915   // emit array address setup early so it schedules better
1916   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1917 
1918   if (GenerateRangeChecks && needs_range_check) {
1919     if (use_length) {


1920       // TODO: use a (modified) version of array_range_check that does not require a
1921       //       constant length to be loaded to a register
1922       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1923       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1924     } else {
1925       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1926       // The range check performs the null check, so clear it out for the load
1927       null_check_info = NULL;
1928     }
1929   }
1930 
1931   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1932 }
1933 
1934 
1935 void LIRGenerator::do_NullCheck(NullCheck* x) {
1936   if (x->can_trap()) {
1937     LIRItem value(x->obj(), this);
1938     value.load_item();
1939     CodeEmitInfo* info = state_for(x);


2617     args->append(meth);
2618     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2619   }
2620 
2621   if (method()->is_synchronized()) {
2622     LIR_Opr obj;
2623     if (method()->is_static()) {
2624       obj = new_register(T_OBJECT);
2625       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2626     } else {
2627       Local* receiver = x->state()->local_at(0)->as_Local();
2628       assert(receiver != NULL, "must already exist");
2629       obj = receiver->operand();
2630     }
2631     assert(obj->is_valid(), "must be valid");
2632 
2633     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2634       LIR_Opr lock = new_register(T_INT);
2635       __ load_stack_address_monitor(0, lock);
2636 
2637       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2638       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2639 
2640       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2641       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2642     }
2643   }
2644 
2645   // increment invocation counters if needed
2646   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2647     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2648     increment_invocation_counter(info);
2649   }
2650 
2651   // all blocks with a successor must end with an unconditional jump
2652   // to the successor even if they are consecutive
2653   __ jump(x->default_sux());
2654 }
2655 
2656 
2657 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2658   // construct our frame and model the production of incoming pointer
2659   // to the OSR buffer.
2660   __ osr_entry(LIR_Assembler::osrBufferPointer());
2661   LIR_Opr result = rlock_result(x);
2662   __ move(LIR_Assembler::osrBufferPointer(), result);
2663 }
2664 
2665 
2666 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2667   assert(args->length() == arg_list->length(),


3084   if (x->pass_thread()) {
3085     signature->append(T_ADDRESS);
3086     args->append(getThreadPointer());
3087   }
3088 
3089   for (int i = 0; i < x->number_of_arguments(); i++) {
3090     Value a = x->argument_at(i);
3091     LIRItem* item = new LIRItem(a, this);
3092     item->load_item();
3093     args->append(item->result());
3094     signature->append(as_BasicType(a->type()));
3095   }
3096 
3097   LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3098   if (x->type() == voidType) {
3099     set_no_result(x);
3100   } else {
3101     __ move(result, rlock_result(x));
3102   }
3103 }


























































































3104 
3105 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3106   LIRItemList args(1);
3107   LIRItem value(arg1, this);
3108   args.append(&value);
3109   BasicTypeList signature;
3110   signature.append(as_BasicType(arg1->type()));
3111 
3112   return call_runtime(&signature, &args, entry, result_type, info);
3113 }
3114 
3115 
3116 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3117   LIRItemList args(2);
3118   LIRItem value1(arg1, this);
3119   LIRItem value2(arg2, this);
3120   args.append(&value1);
3121   args.append(&value2);
3122   BasicTypeList signature;
3123   signature.append(as_BasicType(arg1->type()));




 386 }
 387 
 388 
 389 // This is called for each node in tree; the walk stops if a root is reached
 390 void LIRGenerator::walk(Value instr) {
 391   InstructionMark im(compilation(), instr);
 392   //stop walk when encounter a root
 393   if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
 394     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
 395   } else {
 396     assert(instr->subst() == instr, "shouldn't have missed substitution");
 397     instr->visit(this);
 398     // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
 399   }
 400 }
 401 
 402 
 403 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
 404   assert(state != NULL, "state must be defined");
 405 
 406 #ifndef PRODUCT
 407   state->verify();
 408 #endif
 409 
 410   ValueStack* s = state;
 411   for_each_state(s) {
 412     if (s->kind() == ValueStack::EmptyExceptionState) {
 413       assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
 414       continue;
 415     }
 416 
 417     int index;
 418     Value value;
 419     for_each_stack_value(s, index, value) {
 420       assert(value->subst() == value, "missed substitution");
 421       if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 422         walk(value);
 423         assert(value->operand()->is_valid(), "must be evaluated now");
 424       }
 425     }
 426 
 427     int bci = s->bci();
 428     IRScope* scope = s->scope();
 429     ciMethod* method = scope->method();


 440     if (!liveness.is_valid()) {
 441       // Degenerate or breakpointed method.
 442       bailout("Degenerate or breakpointed method");
 443     } else {
 444       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
 445       for_each_local_value(s, index, value) {
 446         assert(value->subst() == value, "missed substition");
 447         if (liveness.at(index) && !value->type()->is_illegal()) {
 448           if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
 449             walk(value);
 450             assert(value->operand()->is_valid(), "must be evaluated now");
 451           }
 452         } else {
 453           // NULL out this local so that linear scan can assume that all non-NULL values are live.
 454           s->invalidate_local(index);
 455         }
 456       }
 457     }
 458   }
 459 
 460   return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
 461 }
 462 
 463 
 464 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
 465   return state_for(x, x->exception_state());
 466 }
 467 
 468 
 469 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) {
 470   if (!obj->is_loaded() || PatchALot) {
 471     assert(info != NULL, "info must be set if class is not loaded");
 472     __ klass2reg_patch(NULL, r, info);
 473   } else {
 474     // no patching needed
 475     __ metadata2reg(obj->constant_encoding(), r);
 476   }
 477 }
 478 
 479 
 480 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,


1779   } else if (x->needs_null_check()) {
1780     NullCheck* nc = x->explicit_null_check();
1781     if (nc == NULL) {
1782       info = state_for(x);
1783     } else {
1784       info = state_for(nc);
1785     }
1786   }
1787 
1788   LIRItem object(x->obj(), this);
1789 
1790   object.load_item();
1791 
1792 #ifndef PRODUCT
1793   if (PrintNotLoaded && needs_patching) {
1794     tty->print_cr("   ###class not loaded at load_%s bci %d",
1795                   x->is_static() ?  "static" : "field", x->printable_bci());
1796   }
1797 #endif
1798   
1799   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1800   if (x->needs_null_check() &&
1801       (needs_patching ||
1802        MacroAssembler::needs_explicit_null_check(x->offset()) ||
1803        stress_deopt)) {
1804     LIR_Opr obj = object.result();
1805     if (stress_deopt) {
1806       obj = new_register(T_OBJECT);
1807       __ move(LIR_OprFact::oopConst(NULL), obj);
1808     }
1809     // emit an explicit null check because the offset is too large
1810     __ null_check(obj, new CodeEmitInfo(info));
1811   }
1812 
1813   LIR_Opr reg = rlock_result(x, field_type);
1814   LIR_Address* address;
1815   if (needs_patching) {
1816     // we need to patch the offset in the instruction so don't allow
1817     // generate_address to try to be smart about emitting the -1.
1818     // Otherwise the patching code won't know how to find the
1819     // instruction to patch.
1820     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1821   } else {
1822     address = generate_address(object.result(), x->offset(), field_type);
1823   }
1824 
1825   if (is_volatile && !needs_patching) {
1826     volatile_field_load(address, reg, info);
1827   } else {
1828     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1829     __ load(address, reg, info, patch_code);
1830   }


1855     if (index.result()->is_constant()) {
1856       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1857       __ branch(lir_cond_belowEqual, T_INT, stub);
1858     } else {
1859       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1860                   java_nio_Buffer::limit_offset(), T_INT, info);
1861       __ branch(lir_cond_aboveEqual, T_INT, stub);
1862     }
1863     __ move(index.result(), result);
1864   } else {
1865     // Just load the index into the result register
1866     __ move(index.result(), result);
1867   }
1868 }
1869 
1870 
1871 //------------------------array access--------------------------------------
1872 
1873 
1874 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1875   if (x->use_count() == 0 && !x->can_trap()) return;
1876 
1877   LIRItem array(x->array(), this);
1878   array.load_item();
1879   LIR_Opr reg = rlock_result(x);
1880 
1881   CodeEmitInfo* info = NULL;
1882   if (x->needs_null_check()) {
1883     NullCheck* nc = x->explicit_null_check();
1884     if (nc == NULL) {
1885       info = state_for(x);
1886     } else {
1887       info = state_for(nc);
1888     }
1889     if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1890       LIR_Opr obj = new_register(T_OBJECT);
1891       __ move(LIR_OprFact::oopConst(NULL), obj);
1892       __ null_check(obj, new CodeEmitInfo(info));
1893     }
1894   }
1895   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1896 }
1897 
1898 
1899 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1900   bool use_length = x->length() != NULL;
1901   LIRItem array(x->array(), this);
1902   LIRItem index(x->index(), this);
1903   LIRItem length(this);
1904   bool needs_range_check = x->compute_needs_range_check();
1905 
1906   if (use_length && needs_range_check) {


1907     length.set_instruction(x->length());
1908     length.load_item();
1909   }

1910 
1911   array.load_item();
1912   if (index.is_constant() && can_inline_as_constant(x->index())) {
1913     // let it be a constant
1914     index.dont_load_item();
1915   } else {
1916     index.load_item();
1917   }
1918 
1919   CodeEmitInfo* range_check_info = state_for(x);
1920   CodeEmitInfo* null_check_info = NULL;
1921   if (x->needs_null_check()) {
1922     NullCheck* nc = x->explicit_null_check();
1923     if (nc != NULL) {
1924       null_check_info = state_for(nc);
1925     } else {
1926       null_check_info = range_check_info;
1927     }
1928     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1929       LIR_Opr obj = new_register(T_OBJECT);
1930       __ move(LIR_OprFact::oopConst(NULL), obj);
1931       __ null_check(obj, new CodeEmitInfo(null_check_info));
1932     }
1933   }
1934 
1935   // emit array address setup early so it schedules better
1936   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1937 
1938   if (GenerateRangeChecks && needs_range_check) {
1939     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1940       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1941     } else if (use_length) {
1942       // TODO: use a (modified) version of array_range_check that does not require a
1943       //       constant length to be loaded to a register
1944       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1945       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1946     } else {
1947       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1948       // The range check performs the null check, so clear it out for the load
1949       null_check_info = NULL;
1950     }
1951   }
1952 
1953   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1954 }
1955 
1956 
1957 void LIRGenerator::do_NullCheck(NullCheck* x) {
1958   if (x->can_trap()) {
1959     LIRItem value(x->obj(), this);
1960     value.load_item();
1961     CodeEmitInfo* info = state_for(x);


2639     args->append(meth);
2640     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2641   }
2642 
2643   if (method()->is_synchronized()) {
2644     LIR_Opr obj;
2645     if (method()->is_static()) {
2646       obj = new_register(T_OBJECT);
2647       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2648     } else {
2649       Local* receiver = x->state()->local_at(0)->as_Local();
2650       assert(receiver != NULL, "must already exist");
2651       obj = receiver->operand();
2652     }
2653     assert(obj->is_valid(), "must be valid");
2654 
2655     if (method()->is_synchronized() && GenerateSynchronizationCode) {
2656       LIR_Opr lock = new_register(T_INT);
2657       __ load_stack_address_monitor(0, lock);
2658 
2659       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2660       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2661 
2662       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2663       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2664     }
2665   }
2666 
2667   // increment invocation counters if needed
2668   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2669     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2670     increment_invocation_counter(info);
2671   }
2672 
2673   // all blocks with a successor must end with an unconditional jump
2674   // to the successor even if they are consecutive
2675   __ jump(x->default_sux());
2676 }
2677 
2678 
2679 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2680   // construct our frame and model the production of incoming pointer
2681   // to the OSR buffer.
2682   __ osr_entry(LIR_Assembler::osrBufferPointer());
2683   LIR_Opr result = rlock_result(x);
2684   __ move(LIR_Assembler::osrBufferPointer(), result);
2685 }
2686 
2687 
2688 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2689   assert(args->length() == arg_list->length(),


3106   if (x->pass_thread()) {
3107     signature->append(T_ADDRESS);
3108     args->append(getThreadPointer());
3109   }
3110 
3111   for (int i = 0; i < x->number_of_arguments(); i++) {
3112     Value a = x->argument_at(i);
3113     LIRItem* item = new LIRItem(a, this);
3114     item->load_item();
3115     args->append(item->result());
3116     signature->append(as_BasicType(a->type()));
3117   }
3118 
3119   LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3120   if (x->type() == voidType) {
3121     set_no_result(x);
3122   } else {
3123     __ move(result, rlock_result(x));
3124   }
3125 }
3126 
3127 void LIRGenerator::do_Assert(Assert *x) {
3128 #ifndef PRODUCT
3129   ValueTag tag = x->x()->type()->tag();
3130   If::Condition cond = x->cond();
3131 
3132   LIRItem xitem(x->x(), this);
3133   LIRItem yitem(x->y(), this);
3134   LIRItem* xin = &xitem;
3135   LIRItem* yin = &yitem;
3136 
3137   assert(tag == intTag, "Only integer assertions are valid!");
3138 
3139   xin->load_item();
3140   yin->dont_load_item();
3141 
3142   set_no_result(x);
3143 
3144   LIR_Opr left = xin->result();
3145   LIR_Opr right = yin->result();
3146 
3147   __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3148 
3149 #endif
3150 }
3151 
3152 
3153 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3154 
3155 
3156   Instruction *a = x->x();
3157   Instruction *b = x->y();
3158   if (!a || StressRangeCheckElimination) {
3159     assert(!b || StressRangeCheckElimination, "B must also be null");
3160 
3161     CodeEmitInfo *info = state_for(x, x->state());
3162     CodeStub* stub = new PredicateFailedStub(info);
3163 
3164     __ jump(stub);
3165   } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3166     int a_int = a->type()->as_IntConstant()->value();
3167     int b_int = b->type()->as_IntConstant()->value();
3168 
3169     bool ok = false;
3170 
3171     switch(x->cond()) {
3172       case Instruction::eql: ok = (a_int == b_int); break;
3173       case Instruction::neq: ok = (a_int != b_int); break;
3174       case Instruction::lss: ok = (a_int < b_int); break;
3175       case Instruction::leq: ok = (a_int <= b_int); break;
3176       case Instruction::gtr: ok = (a_int > b_int); break;
3177       case Instruction::geq: ok = (a_int >= b_int); break;
3178       case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3179       case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3180       default: ShouldNotReachHere();
3181     }
3182 
3183     if (ok) {
3184 
3185       CodeEmitInfo *info = state_for(x, x->state());
3186       CodeStub* stub = new PredicateFailedStub(info);
3187 
3188       __ jump(stub);
3189     }
3190   } else {
3191 
3192     ValueTag tag = x->x()->type()->tag();
3193     If::Condition cond = x->cond();
3194     LIRItem xitem(x->x(), this);
3195     LIRItem yitem(x->y(), this);
3196     LIRItem* xin = &xitem;
3197     LIRItem* yin = &yitem;
3198 
3199     assert(tag == intTag, "Only integer deoptimizations are valid!");
3200 
3201     xin->load_item();
3202     yin->dont_load_item();
3203     set_no_result(x);
3204 
3205     LIR_Opr left = xin->result();
3206     LIR_Opr right = yin->result();
3207 
3208     CodeEmitInfo *info = state_for(x, x->state());
3209     CodeStub* stub = new PredicateFailedStub(info);
3210 
3211     __ cmp(lir_cond(cond), left, right);
3212     __ branch(lir_cond(cond), right->type(), stub);
3213   }
3214 }
3215 
3216 
3217 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3218   LIRItemList args(1);
3219   LIRItem value(arg1, this);
3220   args.append(&value);
3221   BasicTypeList signature;
3222   signature.append(as_BasicType(arg1->type()));
3223 
3224   return call_runtime(&signature, &args, entry, result_type, info);
3225 }
3226 
3227 
3228 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3229   LIRItemList args(2);
3230   LIRItem value1(arg1, this);
3231   LIRItem value2(arg2, this);
3232   args.append(&value1);
3233   args.append(&value2);
3234   BasicTypeList signature;
3235   signature.append(as_BasicType(arg1->type()));