279 LIRItem obj(x->obj(), this);
280 obj.load_item();
281
282 set_no_result(x);
283
284 // "lock" stores the address of the monitor stack slot, so this is not an oop
285 LIR_Opr lock = new_register(T_INT);
286 // Need a scratch register for biased locking on x86
287 LIR_Opr scratch = LIR_OprFact::illegalOpr;
288 if (UseBiasedLocking) {
289 scratch = new_register(T_INT);
290 }
291
292 CodeEmitInfo* info_for_exception = NULL;
293 if (x->needs_null_check()) {
294 info_for_exception = state_for(x);
295 }
296 // this CodeEmitInfo must not have the xhandlers because here the
297 // object is already locked (xhandlers expect object to be unlocked)
298 CodeEmitInfo* info = state_for(x, x->state(), true);
299 LIR_Opr obj_opr = obj.result();
300 DecoratorSet decorators = IN_HEAP;
301 if (!x->needs_null_check()) {
302 decorators |= IS_NOT_NULL;
303 }
304 obj_opr = access_resolve_for_write(decorators, obj_opr, state_for(x));
305 monitor_enter(obj_opr, lock, syncTempOpr(), scratch,
306 x->monitor_no(), info_for_exception, info);
307 }
308
309
310 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
311 assert(x->is_pinned(),"");
312
313 LIRItem obj(x->obj(), this);
314 obj.dont_load_item();
315
316 LIR_Opr lock = new_register(T_INT);
317 LIR_Opr obj_temp = new_register(T_INT);
318 set_no_result(x);
319 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
320 }
321
322
323 // _ineg, _lneg, _fneg, _dneg
324 void LIRGenerator::do_NegateOp(NegateOp* x) {
325 LIRItem value(x->x(), this);
900 }
901 break;
902 default: ShouldNotReachHere();
903 }
904 #endif // _LP64
905 __ move(result_reg, calc_result);
906 }
907
908 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
909 assert(x->number_of_arguments() == 5, "wrong type");
910
911 // Make all state_for calls early since they can emit code
912 CodeEmitInfo* info = state_for(x, x->state());
913
914 LIRItem src(x->argument_at(0), this);
915 LIRItem src_pos(x->argument_at(1), this);
916 LIRItem dst(x->argument_at(2), this);
917 LIRItem dst_pos(x->argument_at(3), this);
918 LIRItem length(x->argument_at(4), this);
919
920 dst.load_item();
921 LIR_Opr dst_op = dst.result();
922 DecoratorSet decorators = IN_HEAP;
923 if (!x->arg_needs_null_check(2)) {
924 decorators |= IS_NOT_NULL;
925 }
926 dst_op = access_resolve_for_write(decorators, dst_op, info);
927 src.load_item();
928 LIR_Opr src_op = src.result();
929 decorators = IN_HEAP;
930 if (!x->arg_needs_null_check(0)) {
931 decorators |= IS_NOT_NULL;
932 }
933 src_op = access_resolve_for_read(decorators, src_op, info);
934
935 // operands for arraycopy must use fixed registers, otherwise
936 // LinearScan will fail allocation (because arraycopy always needs a
937 // call)
938
939 #ifndef _LP64
940 src_op = force_opr_to(src_op, FrameMap::rcx_oop_opr);
941 src_pos.load_item_force (FrameMap::rdx_opr);
942 dst_op = force_opr_to(dst_op, FrameMap::rax_oop_opr);
943 dst_pos.load_item_force (FrameMap::rbx_opr);
944 length.load_item_force (FrameMap::rdi_opr);
945 LIR_Opr tmp = (FrameMap::rsi_opr);
946 #else
947
948 // The java calling convention will give us enough registers
949 // so that on the stub side the args will be perfect already.
950 // On the other slow/special case side we call C and the arg
951 // positions are not similar enough to pick one as the best.
952 // Also because the java calling convention is a "shifted" version
953 // of the C convention we can process the java args trivially into C
954 // args without worry of overwriting during the xfer
955
956 src_op = force_opr_to(src_op, FrameMap::as_oop_opr(j_rarg0));
957 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
958 dst_op = force_opr_to(dst_op, FrameMap::as_oop_opr(j_rarg2));
959 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
960 length.load_item_force (FrameMap::as_opr(j_rarg4));
961
962 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
963 #endif // LP64
964
965 set_no_result(x);
966
967 int flags;
968 ciArrayKlass* expected_type;
969 arraycopy_helper(x, &flags, &expected_type);
970
971 __ arraycopy(src_op, src_pos.result(), dst_op, dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
972 }
973
974 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
975 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
976 // Make all state_for calls early since they can emit code
977 LIR_Opr result = rlock_result(x);
978 int flags = 0;
979 switch (x->id()) {
980 case vmIntrinsics::_updateCRC32: {
981 LIRItem crc(x->argument_at(0), this);
982 LIRItem val(x->argument_at(1), this);
983 // val is destroyed by update_crc32
984 val.set_destroys_register();
985 crc.load_item();
986 val.load_item();
987 __ update_crc32(crc.result(), val.result(), result);
988 break;
989 }
990 case vmIntrinsics::_updateBytesCRC32:
991 case vmIntrinsics::_updateByteBufferCRC32: {
1063 LIR_Opr result = rlock_result(x);
1064
1065 LIRItem a(x->argument_at(0), this); // Object
1066 LIRItem aOffset(x->argument_at(1), this); // long
1067 LIRItem b(x->argument_at(2), this); // Object
1068 LIRItem bOffset(x->argument_at(3), this); // long
1069 LIRItem length(x->argument_at(4), this); // int
1070 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int
1071
1072 a.load_item();
1073 aOffset.load_nonconstant();
1074 b.load_item();
1075 bOffset.load_nonconstant();
1076
1077 long constant_aOffset = 0;
1078 LIR_Opr result_aOffset = aOffset.result();
1079 if (result_aOffset->is_constant()) {
1080 constant_aOffset = result_aOffset->as_jlong();
1081 result_aOffset = LIR_OprFact::illegalOpr;
1082 }
1083 LIR_Opr result_a = a.result();
1084 result_a = access_resolve_for_read(IN_HEAP, result_a, NULL);
1085
1086 long constant_bOffset = 0;
1087 LIR_Opr result_bOffset = bOffset.result();
1088 if (result_bOffset->is_constant()) {
1089 constant_bOffset = result_bOffset->as_jlong();
1090 result_bOffset = LIR_OprFact::illegalOpr;
1091 }
1092 LIR_Opr result_b = b.result();
1093 result_b = access_resolve_for_read(IN_HEAP, result_b, NULL);
1094
1095 #ifndef _LP64
1096 result_a = new_register(T_INT);
1097 __ convert(Bytecodes::_l2i, a.result(), result_a);
1098 result_b = new_register(T_INT);
1099 __ convert(Bytecodes::_l2i, b.result(), result_b);
1100 #endif
1101
1102
1103 LIR_Address* addr_a = new LIR_Address(result_a,
1104 result_aOffset,
1105 constant_aOffset,
1106 T_BYTE);
1107
1108 LIR_Address* addr_b = new LIR_Address(result_b,
1109 result_bOffset,
1110 constant_bOffset,
1111 T_BYTE);
1112
1113 BasicTypeList signature(4);
|
279 LIRItem obj(x->obj(), this);
280 obj.load_item();
281
282 set_no_result(x);
283
284 // "lock" stores the address of the monitor stack slot, so this is not an oop
285 LIR_Opr lock = new_register(T_INT);
286 // Need a scratch register for biased locking on x86
287 LIR_Opr scratch = LIR_OprFact::illegalOpr;
288 if (UseBiasedLocking) {
289 scratch = new_register(T_INT);
290 }
291
292 CodeEmitInfo* info_for_exception = NULL;
293 if (x->needs_null_check()) {
294 info_for_exception = state_for(x);
295 }
296 // this CodeEmitInfo must not have the xhandlers because here the
297 // object is already locked (xhandlers expect object to be unlocked)
298 CodeEmitInfo* info = state_for(x, x->state(), true);
299 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
300 x->monitor_no(), info_for_exception, info);
301 }
302
303
304 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
305 assert(x->is_pinned(),"");
306
307 LIRItem obj(x->obj(), this);
308 obj.dont_load_item();
309
310 LIR_Opr lock = new_register(T_INT);
311 LIR_Opr obj_temp = new_register(T_INT);
312 set_no_result(x);
313 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
314 }
315
316
317 // _ineg, _lneg, _fneg, _dneg
318 void LIRGenerator::do_NegateOp(NegateOp* x) {
319 LIRItem value(x->x(), this);
894 }
895 break;
896 default: ShouldNotReachHere();
897 }
898 #endif // _LP64
899 __ move(result_reg, calc_result);
900 }
901
902 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
903 assert(x->number_of_arguments() == 5, "wrong type");
904
905 // Make all state_for calls early since they can emit code
906 CodeEmitInfo* info = state_for(x, x->state());
907
908 LIRItem src(x->argument_at(0), this);
909 LIRItem src_pos(x->argument_at(1), this);
910 LIRItem dst(x->argument_at(2), this);
911 LIRItem dst_pos(x->argument_at(3), this);
912 LIRItem length(x->argument_at(4), this);
913
914 // operands for arraycopy must use fixed registers, otherwise
915 // LinearScan will fail allocation (because arraycopy always needs a
916 // call)
917
918 #ifndef _LP64
919 src.load_item_force (FrameMap::rcx_oop_opr);
920 src_pos.load_item_force (FrameMap::rdx_opr);
921 dst.load_item_force (FrameMap::rax_oop_opr);
922 dst_pos.load_item_force (FrameMap::rbx_opr);
923 length.load_item_force (FrameMap::rdi_opr);
924 LIR_Opr tmp = (FrameMap::rsi_opr);
925 #else
926
927 // The java calling convention will give us enough registers
928 // so that on the stub side the args will be perfect already.
929 // On the other slow/special case side we call C and the arg
930 // positions are not similar enough to pick one as the best.
931 // Also because the java calling convention is a "shifted" version
932 // of the C convention we can process the java args trivially into C
933 // args without worry of overwriting during the xfer
934
935 src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
936 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
937 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
938 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
939 length.load_item_force (FrameMap::as_opr(j_rarg4));
940
941 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
942 #endif // LP64
943
944 set_no_result(x);
945
946 int flags;
947 ciArrayKlass* expected_type;
948 arraycopy_helper(x, &flags, &expected_type);
949
950 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
951 }
952
953 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
954 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
955 // Make all state_for calls early since they can emit code
956 LIR_Opr result = rlock_result(x);
957 int flags = 0;
958 switch (x->id()) {
959 case vmIntrinsics::_updateCRC32: {
960 LIRItem crc(x->argument_at(0), this);
961 LIRItem val(x->argument_at(1), this);
962 // val is destroyed by update_crc32
963 val.set_destroys_register();
964 crc.load_item();
965 val.load_item();
966 __ update_crc32(crc.result(), val.result(), result);
967 break;
968 }
969 case vmIntrinsics::_updateBytesCRC32:
970 case vmIntrinsics::_updateByteBufferCRC32: {
1042 LIR_Opr result = rlock_result(x);
1043
1044 LIRItem a(x->argument_at(0), this); // Object
1045 LIRItem aOffset(x->argument_at(1), this); // long
1046 LIRItem b(x->argument_at(2), this); // Object
1047 LIRItem bOffset(x->argument_at(3), this); // long
1048 LIRItem length(x->argument_at(4), this); // int
1049 LIRItem log2ArrayIndexScale(x->argument_at(5), this); // int
1050
1051 a.load_item();
1052 aOffset.load_nonconstant();
1053 b.load_item();
1054 bOffset.load_nonconstant();
1055
1056 long constant_aOffset = 0;
1057 LIR_Opr result_aOffset = aOffset.result();
1058 if (result_aOffset->is_constant()) {
1059 constant_aOffset = result_aOffset->as_jlong();
1060 result_aOffset = LIR_OprFact::illegalOpr;
1061 }
1062 LIR_Opr result_a = access_resolve_for_read(IN_HEAP, a.result(), NULL);
1063
1064 long constant_bOffset = 0;
1065 LIR_Opr result_bOffset = bOffset.result();
1066 if (result_bOffset->is_constant()) {
1067 constant_bOffset = result_bOffset->as_jlong();
1068 result_bOffset = LIR_OprFact::illegalOpr;
1069 }
1070 LIR_Opr result_b = access_resolve_for_read(IN_HEAP, b.result(), NULL);
1071
1072 #ifndef _LP64
1073 result_a = new_register(T_INT);
1074 __ convert(Bytecodes::_l2i, a.result(), result_a);
1075 result_b = new_register(T_INT);
1076 __ convert(Bytecodes::_l2i, b.result(), result_b);
1077 #endif
1078
1079
1080 LIR_Address* addr_a = new LIR_Address(result_a,
1081 result_aOffset,
1082 constant_aOffset,
1083 T_BYTE);
1084
1085 LIR_Address* addr_b = new LIR_Address(result_b,
1086 result_bOffset,
1087 constant_bOffset,
1088 T_BYTE);
1089
1090 BasicTypeList signature(4);
|