< prev index next >

src/cpu/x86/vm/c1_LIRGenerator_x86.cpp

Print this page




 283     length.load_item();
 284 
 285   }
 286   if (needs_store_check) {
 287     value.load_item();
 288   } else {
 289     value.load_for_store(x->elt_type());
 290   }
 291 
 292   set_no_result(x);
 293 
 294   // the CodeEmitInfo must be duplicated for each different
 295   // LIR-instruction because spilling can occur anywhere between two
 296   // instructions and so the debug information must be different
 297   CodeEmitInfo* range_check_info = state_for(x);
 298   CodeEmitInfo* null_check_info = NULL;
 299   if (x->needs_null_check()) {
 300     null_check_info = new CodeEmitInfo(range_check_info);
 301   }
 302 











 303   // emit array address setup early so it schedules better
 304   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
 305 
 306   if (GenerateRangeChecks && needs_range_check) {
 307     if (use_length) {
 308       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 309       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 310     } else {
 311       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
 312       // range_check also does the null check
 313       null_check_info = NULL;
 314     }
 315   }
 316 
 317   if (GenerateArrayStoreCheck && needs_store_check) {
 318     LIR_Opr tmp1 = new_register(objectType);
 319     LIR_Opr tmp2 = new_register(objectType);
 320     LIR_Opr tmp3 = new_register(objectType);
 321 
 322     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 323     __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 324   }
 325 
 326   if (obj_store) {
 327     // Needs GC write barriers.
 328     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 329                 true /* do_load */, false /* patch */, NULL);
 330     __ move(value.result(), array_addr, null_check_info);
 331     // Seems to be a precise
 332     post_barrier(LIR_OprFact::address(array_addr), value.result());
 333   } else {
 334     __ move(value.result(), array_addr, null_check_info);
 335   }
 336 }
 337 
 338 
 339 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 340   assert(x->is_pinned(),"");
 341   LIRItem obj(x->obj(), this);
 342   obj.load_item();
 343 
 344   set_no_result(x);
 345 
 346   // "lock" stores the address of the monitor stack slot, so this is not an oop
 347   LIR_Opr lock = new_register(T_INT);
 348   // Need a scratch register for biased locking on x86
 349   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 350   if (UseBiasedLocking) {
 351     scratch = new_register(T_INT);
 352   }
 353 
 354   CodeEmitInfo* info_for_exception = NULL;
 355   if (x->needs_null_check()) {
 356     info_for_exception = state_for(x);
 357   }
 358   // this CodeEmitInfo must not have the xhandlers because here the
 359   // object is already locked (xhandlers expect object to be unlocked)
 360   CodeEmitInfo* info = state_for(x, x->state(), true);
 361   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,


 362                         x->monitor_no(), info_for_exception, info);
 363 }
 364 
 365 
 366 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 367   assert(x->is_pinned(),"");
 368 
 369   LIRItem obj(x->obj(), this);
 370   obj.dont_load_item();
 371 
 372   LIR_Opr lock = new_register(T_INT);
 373   LIR_Opr obj_temp = new_register(T_INT);
 374   set_no_result(x);
 375   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 376 }
 377 
 378 
 379 // _ineg, _lneg, _fneg, _dneg
 380 void LIRGenerator::do_NegateOp(NegateOp* x) {
 381   LIRItem value(x->x(), this);


 733 
 734   // get address of field
 735   obj.load_item();
 736   offset.load_nonconstant();
 737 
 738   if (type == objectType) {
 739     cmp.load_item_force(FrameMap::rax_oop_opr);
 740     val.load_item();
 741   } else if (type == intType) {
 742     cmp.load_item_force(FrameMap::rax_opr);
 743     val.load_item();
 744   } else if (type == longType) {
 745     cmp.load_item_force(FrameMap::long0_opr);
 746     val.load_item_force(FrameMap::long1_opr);
 747   } else {
 748     ShouldNotReachHere();
 749   }
 750 
 751   LIR_Opr addr = new_pointer_register();
 752   LIR_Address* a;




 753   if(offset.result()->is_constant()) {
 754 #ifdef _LP64
 755     jlong c = offset.result()->as_jlong();
 756     if ((jlong)((jint)c) == c) {
 757       a = new LIR_Address(obj.result(),
 758                           (jint)c,
 759                           as_BasicType(type));
 760     } else {
 761       LIR_Opr tmp = new_register(T_LONG);
 762       __ move(offset.result(), tmp);
 763       a = new LIR_Address(obj.result(),
 764                           tmp,
 765                           as_BasicType(type));
 766     }
 767 #else
 768     a = new LIR_Address(obj.result(),
 769                         offset.result()->as_jint(),
 770                         as_BasicType(type));
 771 #endif
 772   } else {
 773     a = new LIR_Address(obj.result(),
 774                         offset.result(),
 775                         LIR_Address::times_1,
 776                         0,
 777                         as_BasicType(type));
 778   }
 779   __ leal(LIR_OprFact::address(a), addr);
 780 
 781   if (type == objectType) {  // Write-barrier needed for Object fields.
 782     // Do the pre-write barrier, if any.
 783     pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
 784                 true /* do_load */, false /* patch */, NULL);
 785   }
 786 
 787   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 788   if (type == objectType)
 789     __ cas_obj(addr, cmp.result(), val.result(), ill, ill);





 790   else if (type == intType)
 791     __ cas_int(addr, cmp.result(), val.result(), ill, ill);
 792   else if (type == longType)
 793     __ cas_long(addr, cmp.result(), val.result(), ill, ill);
 794   else {
 795     ShouldNotReachHere();
 796   }
 797 
 798   // generate conditional move of boolean result
 799   LIR_Opr result = rlock_result(x);
 800   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 801            result, as_BasicType(type));
 802   if (type == objectType) {   // Write-barrier needed for Object fields.
 803     // Seems to be precise
 804     post_barrier(addr, val.result());
 805   }
 806 }
 807 
 808 
 809 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 810   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
 811   LIRItem value(x->argument_at(0), this);
 812 
 813   bool use_fpu = false;
 814   if (UseSSE >= 2) {
 815     switch(x->id()) {
 816       case vmIntrinsics::_dsin:
 817       case vmIntrinsics::_dcos:
 818       case vmIntrinsics::_dtan:
 819       case vmIntrinsics::_dlog:
 820       case vmIntrinsics::_dlog10:
 821       case vmIntrinsics::_dexp:
 822       case vmIntrinsics::_dpow:
 823         use_fpu = true;
 824     }


 876   }
 877 
 878   if (use_fpu) {
 879     __ move(calc_result, x->operand());
 880   }
 881 }
 882 
 883 
 884 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 885   assert(x->number_of_arguments() == 5, "wrong type");
 886 
 887   // Make all state_for calls early since they can emit code
 888   CodeEmitInfo* info = state_for(x, x->state());
 889 
 890   LIRItem src(x->argument_at(0), this);
 891   LIRItem src_pos(x->argument_at(1), this);
 892   LIRItem dst(x->argument_at(2), this);
 893   LIRItem dst_pos(x->argument_at(3), this);
 894   LIRItem length(x->argument_at(4), this);
 895 





 896   // operands for arraycopy must use fixed registers, otherwise
 897   // LinearScan will fail allocation (because arraycopy always needs a
 898   // call)
 899 
 900 #ifndef _LP64
 901   src.load_item_force     (FrameMap::rcx_oop_opr);
 902   src_pos.load_item_force (FrameMap::rdx_opr);
 903   dst.load_item_force     (FrameMap::rax_oop_opr);
 904   dst_pos.load_item_force (FrameMap::rbx_opr);
 905   length.load_item_force  (FrameMap::rdi_opr);
 906   LIR_Opr tmp =           (FrameMap::rsi_opr);
 907 #else
 908 
 909   // The java calling convention will give us enough registers
 910   // so that on the stub side the args will be perfect already.
 911   // On the other slow/special case side we call C and the arg
 912   // positions are not similar enough to pick one as the best.
 913   // Also because the java calling convention is a "shifted" version
 914   // of the C convention we can process the java args trivially into C
 915   // args without worry of overwriting during the xfer
 916 
 917   src.load_item_force     (FrameMap::as_oop_opr(j_rarg0));
 918   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 919   dst.load_item_force     (FrameMap::as_oop_opr(j_rarg2));
 920   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 921   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 922 
 923   LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
 924 #endif // LP64
 925 
 926   set_no_result(x);
 927 
 928   int flags;
 929   ciArrayKlass* expected_type;
 930   arraycopy_helper(x, &flags, &expected_type);
 931 
 932   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
 933 }
 934 
 935 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 936   assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
 937   // Make all state_for calls early since they can emit code
 938   LIR_Opr result = rlock_result(x);
 939   int flags = 0;
 940   switch (x->id()) {
 941     case vmIntrinsics::_updateCRC32: {
 942       LIRItem crc(x->argument_at(0), this);
 943       LIRItem val(x->argument_at(1), this);
 944       // val is destroyed by update_crc32
 945       val.set_destroys_register();
 946       crc.load_item();
 947       val.load_item();
 948       __ update_crc32(crc.result(), val.result(), result);
 949       break;
 950     }
 951     case vmIntrinsics::_updateBytesCRC32:
 952     case vmIntrinsics::_updateByteBufferCRC32: {


 963       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
 964       if(off.result()->is_constant()) {
 965         index = LIR_OprFact::illegalOpr;
 966        offset += off.result()->as_jint();
 967       }
 968       LIR_Opr base_op = buf.result();
 969 
 970 #ifndef _LP64
 971       if (!is_updateBytes) { // long b raw address
 972          base_op = new_register(T_INT);
 973          __ convert(Bytecodes::_l2i, buf.result(), base_op);
 974       }
 975 #else
 976       if (index->is_valid()) {
 977         LIR_Opr tmp = new_register(T_LONG);
 978         __ convert(Bytecodes::_i2l, index, tmp);
 979         index = tmp;
 980       }
 981 #endif
 982 




 983       LIR_Address* a = new LIR_Address(base_op,
 984                                        index,
 985                                        LIR_Address::times_1,
 986                                        offset,
 987                                        T_BYTE);
 988       BasicTypeList signature(3);
 989       signature.append(T_INT);
 990       signature.append(T_ADDRESS);
 991       signature.append(T_INT);
 992       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
 993       const LIR_Opr result_reg = result_register_for(x->type());
 994 
 995       LIR_Opr addr = new_pointer_register();
 996       __ leal(LIR_OprFact::address(a), addr);
 997 
 998       crc.load_item_force(cc->at(0));
 999       __ move(addr, cc->at(1));
1000       len.load_item_force(cc->at(2));
1001 
1002       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());


1295   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1296     // inline long zero
1297     yin->dont_load_item();
1298   } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1299     // longs cannot handle constants at right side
1300     yin->load_item();
1301   } else {
1302     yin->dont_load_item();
1303   }
1304 
1305   // add safepoint before generating condition code so it can be recomputed
1306   if (x->is_safepoint()) {
1307     // increment backedge counter if needed
1308     increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1309     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1310   }
1311   set_no_result(x);
1312 
1313   LIR_Opr left = xin->result();
1314   LIR_Opr right = yin->result();




1315   __ cmp(lir_cond(cond), left, right);
1316   // Generate branch profiling. Profiling code doesn't kill flags.
1317   profile_branch(x, cond);
1318   move_to_phi(x->state());
1319   if (x->x()->type()->is_float_kind()) {
1320     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1321   } else {
1322     __ branch(lir_cond(cond), right->type(), x->tsux());
1323   }
1324   assert(x->default_sux() == x->fsux(), "wrong destination above");
1325   __ jump(x->default_sux());
1326 }
1327 
1328 
1329 LIR_Opr LIRGenerator::getThreadPointer() {
1330 #ifdef _LP64
1331   return FrameMap::as_pointer_opr(r15_thread);
1332 #else
1333   LIR_Opr result = new_register(T_INT);
1334   __ get_thread(result);


1374                               address->index(), address->scale(),
1375                               address->disp(), T_DOUBLE);
1376     // Transfer the value atomically by using FP moves.  This means
1377     // the value has to be moved between CPU and FPU registers.  In
1378     // SSE0 and SSE1 mode it has to be moved through spill slot but in
1379     // SSE2+ mode it can be moved directly.
1380     LIR_Opr temp_double = new_register(T_DOUBLE);
1381     __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1382     __ volatile_move(temp_double, result, T_LONG);
1383     if (UseSSE < 2) {
1384       // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1385       set_vreg_flag(result, must_start_in_memory);
1386     }
1387   } else {
1388     __ load(address, result, info);
1389   }
1390 }
1391 
1392 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1393                                      BasicType type, bool is_volatile) {

1394   if (is_volatile && type == T_LONG) {
1395     LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1396     LIR_Opr tmp = new_register(T_DOUBLE);
1397     __ load(addr, tmp);
1398     LIR_Opr spill = new_register(T_LONG);
1399     set_vreg_flag(spill, must_start_in_memory);
1400     __ move(tmp, spill);
1401     __ move(spill, dst);
1402   } else {
1403     LIR_Address* addr = new LIR_Address(src, offset, type);
1404     __ load(addr, dst);
1405   }
1406 }
1407 
1408 
1409 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1410                                      BasicType type, bool is_volatile) {

1411   if (is_volatile && type == T_LONG) {
1412     LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1413     LIR_Opr tmp = new_register(T_DOUBLE);
1414     LIR_Opr spill = new_register(T_DOUBLE);
1415     set_vreg_flag(spill, must_start_in_memory);
1416     __ move(data, spill);
1417     __ move(spill, tmp);
1418     __ move(tmp, addr);
1419   } else {
1420     LIR_Address* addr = new LIR_Address(src, offset, type);
1421     bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1422     if (is_obj) {
1423       // Do the pre-write barrier, if any.
1424       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1425                   true /* do_load */, false /* patch */, NULL);

1426       __ move(data, addr);
1427       assert(src->is_register(), "must be register");
1428       // Seems to be a precise address
1429       post_barrier(LIR_OprFact::address(addr), data);
1430     } else {
1431       __ move(data, addr);
1432     }
1433   }
1434 }
1435 
1436 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1437   BasicType type = x->basic_type();
1438   LIRItem src(x->object(), this);
1439   LIRItem off(x->offset(), this);
1440   LIRItem value(x->value(), this);
1441 
1442   src.load_item();
1443   value.load_item();
1444   off.load_nonconstant();
1445 
1446   LIR_Opr dst = rlock_result(x, type);
1447   LIR_Opr data = value.result();
1448   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1449   LIR_Opr offset = off.result();
1450 
1451   assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");







1452   LIR_Address* addr;
1453   if (offset->is_constant()) {
1454 #ifdef _LP64
1455     jlong c = offset->as_jlong();
1456     if ((jlong)((jint)c) == c) {
1457       addr = new LIR_Address(src.result(), (jint)c, type);
1458     } else {
1459       LIR_Opr tmp = new_register(T_LONG);
1460       __ move(offset, tmp);
1461       addr = new LIR_Address(src.result(), tmp, type);
1462     }
1463 #else
1464     addr = new LIR_Address(src.result(), offset->as_jint(), type);
1465 #endif
1466   } else {
1467     addr = new LIR_Address(src.result(), offset, type);
1468   }
1469 
1470   // Because we want a 2-arg form of xchg and xadd
1471   __ move(data, dst);
1472 
1473   if (x->is_add()) {
1474     __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
1475   } else {
1476     if (is_obj) {
1477       // Do the pre-write barrier, if any.
1478       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1479                   true /* do_load */, false /* patch */, NULL);
1480     }
1481     __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
1482     if (is_obj) {
1483       // Seems to be a precise address
1484       post_barrier(LIR_OprFact::address(addr), data);
1485     }
1486   }
1487 }


 283     length.load_item();
 284 
 285   }
 286   if (needs_store_check) {
 287     value.load_item();
 288   } else {
 289     value.load_for_store(x->elt_type());
 290   }
 291 
 292   set_no_result(x);
 293 
 294   // the CodeEmitInfo must be duplicated for each different
 295   // LIR-instruction because spilling can occur anywhere between two
 296   // instructions and so the debug information must be different
 297   CodeEmitInfo* range_check_info = state_for(x);
 298   CodeEmitInfo* null_check_info = NULL;
 299   if (x->needs_null_check()) {
 300     null_check_info = new CodeEmitInfo(range_check_info);
 301   }
 302 
 303   LIR_Opr ary = array.result();
 304   ary = shenandoah_write_barrier(ary, null_check_info, x->needs_null_check());
 305   LIR_Opr val = value.result();
 306   if (obj_store && UseShenandoahGC) {
 307     if (! val->is_register()) {
 308       assert(val->is_constant(), "expect constant");
 309     } else {
 310       val = shenandoah_read_barrier(val, NULL, true);
 311     }
 312   }
 313 
 314   // emit array address setup early so it schedules better
 315   LIR_Address* array_addr = emit_array_address(ary, index.result(), x->elt_type(), obj_store);
 316 
 317   if (GenerateRangeChecks && needs_range_check) {
 318     if (use_length) {
 319       __ cmp(lir_cond_belowEqual, length.result(), index.result());
 320       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
 321     } else {
 322       array_range_check(ary, index.result(), null_check_info, range_check_info);
 323       // range_check also does the null check
 324       null_check_info = NULL;
 325     }
 326   }
 327 
 328   if (GenerateArrayStoreCheck && needs_store_check) {
 329     LIR_Opr tmp1 = new_register(objectType);
 330     LIR_Opr tmp2 = new_register(objectType);
 331     LIR_Opr tmp3 = new_register(objectType);
 332 
 333     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
 334     __ store_check(val, ary, tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
 335   }
 336 
 337   if (obj_store) {
 338     // Needs GC write barriers.
 339     pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
 340                 true /* do_load */, false /* patch */, NULL);
 341     __ move(val, array_addr, null_check_info);
 342     // Seems to be a precise
 343     post_barrier(LIR_OprFact::address(array_addr), value.result());
 344   } else {
 345     __ move(val, array_addr, null_check_info);
 346   }
 347 }
 348 
 349 
 350 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 351   assert(x->is_pinned(),"");
 352   LIRItem obj(x->obj(), this);
 353   obj.load_item();
 354 
 355   set_no_result(x);
 356 
 357   // "lock" stores the address of the monitor stack slot, so this is not an oop
 358   LIR_Opr lock = new_register(T_INT);
 359   // Need a scratch register for biased locking on x86
 360   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 361   if (UseBiasedLocking) {
 362     scratch = new_register(T_INT);
 363   }
 364 
 365   CodeEmitInfo* info_for_exception = NULL;
 366   if (x->needs_null_check()) {
 367     info_for_exception = state_for(x);
 368   }
 369   // this CodeEmitInfo must not have the xhandlers because here the
 370   // object is already locked (xhandlers expect object to be unlocked)
 371   CodeEmitInfo* info = state_for(x, x->state(), true);
 372   LIR_Opr obj_opr = obj.result();
 373   obj_opr = shenandoah_write_barrier(obj_opr, state_for(x), x->needs_null_check());
 374   monitor_enter(obj_opr, lock, syncTempOpr(), scratch,
 375                         x->monitor_no(), info_for_exception, info);
 376 }
 377 
 378 
 379 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 380   assert(x->is_pinned(),"");
 381 
 382   LIRItem obj(x->obj(), this);
 383   obj.dont_load_item();
 384 
 385   LIR_Opr lock = new_register(T_INT);
 386   LIR_Opr obj_temp = new_register(T_INT);
 387   set_no_result(x);
 388   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 389 }
 390 
 391 
 392 // _ineg, _lneg, _fneg, _dneg
 393 void LIRGenerator::do_NegateOp(NegateOp* x) {
 394   LIRItem value(x->x(), this);


 746 
 747   // get address of field
 748   obj.load_item();
 749   offset.load_nonconstant();
 750 
 751   if (type == objectType) {
 752     cmp.load_item_force(FrameMap::rax_oop_opr);
 753     val.load_item();
 754   } else if (type == intType) {
 755     cmp.load_item_force(FrameMap::rax_opr);
 756     val.load_item();
 757   } else if (type == longType) {
 758     cmp.load_item_force(FrameMap::long0_opr);
 759     val.load_item_force(FrameMap::long1_opr);
 760   } else {
 761     ShouldNotReachHere();
 762   }
 763 
 764   LIR_Opr addr = new_pointer_register();
 765   LIR_Address* a;
 766 
 767   LIR_Opr obj_op = obj.result();
 768   obj_op = shenandoah_write_barrier(obj_op, NULL, false);
 769 
 770   if(offset.result()->is_constant()) {
 771 #ifdef _LP64
 772     jlong c = offset.result()->as_jlong();
 773     if ((jlong)((jint)c) == c) {
 774       a = new LIR_Address(obj_op,
 775                           (jint)c,
 776                           as_BasicType(type));
 777     } else {
 778       LIR_Opr tmp = new_register(T_LONG);
 779       __ move(offset.result(), tmp);
 780       a = new LIR_Address(obj_op,
 781                           tmp,
 782                           as_BasicType(type));
 783     }
 784 #else
 785     a = new LIR_Address(obj_op,
 786                         offset.result()->as_jint(),
 787                         as_BasicType(type));
 788 #endif
 789   } else {
 790     a = new LIR_Address(obj_op,
 791                         offset.result(),
 792                         LIR_Address::times_1,
 793                         0,
 794                         as_BasicType(type));
 795   }
 796   __ leal(LIR_OprFact::address(a), addr);
 797 
 798   if (type == objectType) {  // Write-barrier needed for Object fields.
 799     // Do the pre-write barrier, if any.
 800     pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
 801                 true /* do_load */, false /* patch */, NULL);
 802   }
 803 
 804   LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
 805 
 806   LIR_Opr val_op = val.result();
 807 
 808   if (type == objectType) {
 809     val_op = shenandoah_read_barrier(val_op, NULL, true);
 810     __ cas_obj(addr, cmp.result(), val_op, new_register(T_OBJECT), new_register(T_OBJECT));
 811   }
 812   else if (type == intType)
 813     __ cas_int(addr, cmp.result(), val_op, ill, ill);
 814   else if (type == longType)
 815     __ cas_long(addr, cmp.result(), val_op, ill, ill);
 816   else {
 817     ShouldNotReachHere();
 818   }
 819 
 820   // generate conditional move of boolean result
 821   LIR_Opr result = rlock_result(x);
 822   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
 823            result, as_BasicType(type));
 824   if (type == objectType) {   // Write-barrier needed for Object fields.
 825     // Seems to be precise
 826     post_barrier(addr, val_op);
 827   }
 828 }
 829 
 830 
 831 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
 832   assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
 833   LIRItem value(x->argument_at(0), this);
 834 
 835   bool use_fpu = false;
 836   if (UseSSE >= 2) {
 837     switch(x->id()) {
 838       case vmIntrinsics::_dsin:
 839       case vmIntrinsics::_dcos:
 840       case vmIntrinsics::_dtan:
 841       case vmIntrinsics::_dlog:
 842       case vmIntrinsics::_dlog10:
 843       case vmIntrinsics::_dexp:
 844       case vmIntrinsics::_dpow:
 845         use_fpu = true;
 846     }


 898   }
 899 
 900   if (use_fpu) {
 901     __ move(calc_result, x->operand());
 902   }
 903 }
 904 
 905 
 906 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
 907   assert(x->number_of_arguments() == 5, "wrong type");
 908 
 909   // Make all state_for calls early since they can emit code
 910   CodeEmitInfo* info = state_for(x, x->state());
 911 
 912   LIRItem src(x->argument_at(0), this);
 913   LIRItem src_pos(x->argument_at(1), this);
 914   LIRItem dst(x->argument_at(2), this);
 915   LIRItem dst_pos(x->argument_at(3), this);
 916   LIRItem length(x->argument_at(4), this);
 917 
 918   LIR_Opr dst_op = dst.result();
 919   dst_op = shenandoah_write_barrier(dst_op, info, x->arg_needs_null_check(2));
 920   LIR_Opr src_op = src.result();
 921   src_op = shenandoah_read_barrier(src_op, info, x->arg_needs_null_check(0));
 922 
 923   // operands for arraycopy must use fixed registers, otherwise
 924   // LinearScan will fail allocation (because arraycopy always needs a
 925   // call)
 926 
 927 #ifndef _LP64
 928   src_op = force_opr_to(src_op, FrameMap::rcx_oop_opr);
 929   src_pos.load_item_force (FrameMap::rdx_opr);
 930   dst_op = force_opr_to(dst_op, FrameMap::rax_oop_opr);
 931   dst_pos.load_item_force (FrameMap::rbx_opr);
 932   length.load_item_force  (FrameMap::rdi_opr);
 933   LIR_Opr tmp =           (FrameMap::rsi_opr);
 934 #else
 935 
 936   // The java calling convention will give us enough registers
 937   // so that on the stub side the args will be perfect already.
 938   // On the other slow/special case side we call C and the arg
 939   // positions are not similar enough to pick one as the best.
 940   // Also because the java calling convention is a "shifted" version
 941   // of the C convention we can process the java args trivially into C
 942   // args without worry of overwriting during the xfer
 943 
 944   src_op = force_opr_to(src_op, FrameMap::as_oop_opr(j_rarg0));
 945   src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
 946   dst_op = force_opr_to(dst_op, FrameMap::as_oop_opr(j_rarg2));
 947   dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
 948   length.load_item_force  (FrameMap::as_opr(j_rarg4));
 949 
 950   LIR_Opr tmp =           FrameMap::as_opr(j_rarg5);
 951 #endif // LP64
 952 
 953   set_no_result(x);
 954 
 955   int flags;
 956   ciArrayKlass* expected_type;
 957   arraycopy_helper(x, &flags, &expected_type);
 958 
 959   __ arraycopy(src_op, src_pos.result(), dst_op, dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
 960 }
 961 
 962 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
 963   assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
 964   // Make all state_for calls early since they can emit code
 965   LIR_Opr result = rlock_result(x);
 966   int flags = 0;
 967   switch (x->id()) {
 968     case vmIntrinsics::_updateCRC32: {
 969       LIRItem crc(x->argument_at(0), this);
 970       LIRItem val(x->argument_at(1), this);
 971       // val is destroyed by update_crc32
 972       val.set_destroys_register();
 973       crc.load_item();
 974       val.load_item();
 975       __ update_crc32(crc.result(), val.result(), result);
 976       break;
 977     }
 978     case vmIntrinsics::_updateBytesCRC32:
 979     case vmIntrinsics::_updateByteBufferCRC32: {


 990       int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
 991       if(off.result()->is_constant()) {
 992         index = LIR_OprFact::illegalOpr;
 993        offset += off.result()->as_jint();
 994       }
 995       LIR_Opr base_op = buf.result();
 996 
 997 #ifndef _LP64
 998       if (!is_updateBytes) { // long b raw address
 999          base_op = new_register(T_INT);
1000          __ convert(Bytecodes::_l2i, buf.result(), base_op);
1001       }
1002 #else
1003       if (index->is_valid()) {
1004         LIR_Opr tmp = new_register(T_LONG);
1005         __ convert(Bytecodes::_i2l, index, tmp);
1006         index = tmp;
1007       }
1008 #endif
1009 
1010       if (is_updateBytes) {
1011         base_op = shenandoah_read_barrier(base_op, NULL, false);
1012       }
1013 
1014       LIR_Address* a = new LIR_Address(base_op,
1015                                        index,
1016                                        LIR_Address::times_1,
1017                                        offset,
1018                                        T_BYTE);
1019       BasicTypeList signature(3);
1020       signature.append(T_INT);
1021       signature.append(T_ADDRESS);
1022       signature.append(T_INT);
1023       CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1024       const LIR_Opr result_reg = result_register_for(x->type());
1025 
1026       LIR_Opr addr = new_pointer_register();
1027       __ leal(LIR_OprFact::address(a), addr);
1028 
1029       crc.load_item_force(cc->at(0));
1030       __ move(addr, cc->at(1));
1031       len.load_item_force(cc->at(2));
1032 
1033       __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());


1326   if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1327     // inline long zero
1328     yin->dont_load_item();
1329   } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1330     // longs cannot handle constants at right side
1331     yin->load_item();
1332   } else {
1333     yin->dont_load_item();
1334   }
1335 
1336   // add safepoint before generating condition code so it can be recomputed
1337   if (x->is_safepoint()) {
1338     // increment backedge counter if needed
1339     increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1340     __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1341   }
1342   set_no_result(x);
1343 
1344   LIR_Opr left = xin->result();
1345   LIR_Opr right = yin->result();
1346   if (tag == objectTag && UseShenandoahGC && x->y()->type() != objectNull) { // Don't need to resolve for ifnull.
1347     left = shenandoah_write_barrier(left, NULL, true);
1348     right = shenandoah_read_barrier(right, NULL, true);
1349   }
1350   __ cmp(lir_cond(cond), left, right);
1351   // Generate branch profiling. Profiling code doesn't kill flags.
1352   profile_branch(x, cond);
1353   move_to_phi(x->state());
1354   if (x->x()->type()->is_float_kind()) {
1355     __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1356   } else {
1357     __ branch(lir_cond(cond), right->type(), x->tsux());
1358   }
1359   assert(x->default_sux() == x->fsux(), "wrong destination above");
1360   __ jump(x->default_sux());
1361 }
1362 
1363 
1364 LIR_Opr LIRGenerator::getThreadPointer() {
1365 #ifdef _LP64
1366   return FrameMap::as_pointer_opr(r15_thread);
1367 #else
1368   LIR_Opr result = new_register(T_INT);
1369   __ get_thread(result);


1409                               address->index(), address->scale(),
1410                               address->disp(), T_DOUBLE);
1411     // Transfer the value atomically by using FP moves.  This means
1412     // the value has to be moved between CPU and FPU registers.  In
1413     // SSE0 and SSE1 mode it has to be moved through spill slot but in
1414     // SSE2+ mode it can be moved directly.
1415     LIR_Opr temp_double = new_register(T_DOUBLE);
1416     __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1417     __ volatile_move(temp_double, result, T_LONG);
1418     if (UseSSE < 2) {
1419       // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1420       set_vreg_flag(result, must_start_in_memory);
1421     }
1422   } else {
1423     __ load(address, result, info);
1424   }
1425 }
1426 
1427 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1428                                      BasicType type, bool is_volatile) {
1429   src = shenandoah_read_barrier(src, NULL, false);
1430   if (is_volatile && type == T_LONG) {
1431     LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1432     LIR_Opr tmp = new_register(T_DOUBLE);
1433     __ load(addr, tmp);
1434     LIR_Opr spill = new_register(T_LONG);
1435     set_vreg_flag(spill, must_start_in_memory);
1436     __ move(tmp, spill);
1437     __ move(spill, dst);
1438   } else {
1439     LIR_Address* addr = new LIR_Address(src, offset, type);
1440     __ load(addr, dst);
1441   }
1442 }
1443 
1444 
1445 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1446                                      BasicType type, bool is_volatile) {
1447   src = shenandoah_write_barrier(src, NULL, false);
1448   if (is_volatile && type == T_LONG) {
1449     LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1450     LIR_Opr tmp = new_register(T_DOUBLE);
1451     LIR_Opr spill = new_register(T_DOUBLE);
1452     set_vreg_flag(spill, must_start_in_memory);
1453     __ move(data, spill);
1454     __ move(spill, tmp);
1455     __ move(tmp, addr);
1456   } else {
1457     LIR_Address* addr = new LIR_Address(src, offset, type);
1458     bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1459     if (is_obj) {
1460       // Do the pre-write barrier, if any.
1461       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1462                   true /* do_load */, false /* patch */, NULL);
1463       data = shenandoah_read_barrier(data, NULL, true);
1464       __ move(data, addr);
1465       assert(src->is_register(), "must be register");
1466       // Seems to be a precise address
1467       post_barrier(LIR_OprFact::address(addr), data);
1468     } else {
1469       __ move(data, addr);
1470     }
1471   }
1472 }
1473 
1474 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1475   BasicType type = x->basic_type();
1476   LIRItem src(x->object(), this);
1477   LIRItem off(x->offset(), this);
1478   LIRItem value(x->value(), this);
1479 
1480   src.load_item();
1481   value.load_item();
1482   off.load_nonconstant();
1483 
1484   LIR_Opr dst = rlock_result(x, type);
1485   LIR_Opr data = value.result();
1486   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1487   LIR_Opr offset = off.result();
1488 
1489   assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");
1490 
1491   LIR_Opr src_op = src.result();
1492   src_op = shenandoah_write_barrier(src_op, NULL, false);
1493   if (is_obj) {
1494     data = shenandoah_read_barrier(data, NULL, true);
1495   }
1496 
1497   LIR_Address* addr;
1498   if (offset->is_constant()) {
1499 #ifdef _LP64
1500     jlong c = offset->as_jlong();
1501     if ((jlong)((jint)c) == c) {
1502       addr = new LIR_Address(src_op, (jint)c, type);
1503     } else {
1504       LIR_Opr tmp = new_register(T_LONG);
1505       __ move(offset, tmp);
1506       addr = new LIR_Address(src_op, tmp, type);
1507     }
1508 #else
1509     addr = new LIR_Address(src_op, offset->as_jint(), type);
1510 #endif
1511   } else {
1512     addr = new LIR_Address(src_op, offset, type);
1513   }
1514 
1515   // Because we want a 2-arg form of xchg and xadd
1516   __ move(data, dst);
1517 
1518   if (x->is_add()) {
1519     __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
1520   } else {
1521     if (is_obj) {
1522       // Do the pre-write barrier, if any.
1523       pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1524                   true /* do_load */, false /* patch */, NULL);
1525     }
1526     __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
1527     if (is_obj) {
1528       // Seems to be a precise address
1529       post_barrier(LIR_OprFact::address(addr), data);
1530     }
1531   }
1532 }
< prev index next >