326 LIRItem obj(x->obj(), this);
327 obj.load_item();
328
329 set_no_result(x);
330
331 // "lock" stores the address of the monitor stack slot, so this is not an oop
332 LIR_Opr lock = new_register(T_INT);
333 // Need a scratch register for biased locking
334 LIR_Opr scratch = LIR_OprFact::illegalOpr;
335 if (UseBiasedLocking) {
336 scratch = new_register(T_INT);
337 }
338
339 CodeEmitInfo* info_for_exception = NULL;
340 if (x->needs_null_check()) {
341 info_for_exception = state_for(x);
342 }
343 // this CodeEmitInfo must not have the xhandlers because here the
344 // object is already locked (xhandlers expect object to be unlocked)
345 CodeEmitInfo* info = state_for(x, x->state(), true);
346 LIR_Opr obj_opr = obj.result();
347 DecoratorSet decorators = IN_HEAP;
348 if (!x->needs_null_check()) {
349 decorators |= IS_NOT_NULL;
350 }
351 obj_opr = access_resolve_for_write(decorators, obj_opr, state_for(x));
352 monitor_enter(obj_opr, lock, syncTempOpr(), scratch,
353 x->monitor_no(), info_for_exception, info);
354 }
355
356
357 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
358 assert(x->is_pinned(),"");
359
360 LIRItem obj(x->obj(), this);
361 obj.dont_load_item();
362
363 LIR_Opr lock = new_register(T_INT);
364 LIR_Opr obj_temp = new_register(T_INT);
365 set_no_result(x);
366 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
367 }
368
369
370 void LIRGenerator::do_NegateOp(NegateOp* x) {
371
372 LIRItem from(x->x(), this);
861 }
862 break;
863 default: ShouldNotReachHere();
864 }
865 __ move(result_reg, calc_result);
866 }
867
868
869 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
870 assert(x->number_of_arguments() == 5, "wrong type");
871
872 // Make all state_for calls early since they can emit code
873 CodeEmitInfo* info = state_for(x, x->state());
874
875 LIRItem src(x->argument_at(0), this);
876 LIRItem src_pos(x->argument_at(1), this);
877 LIRItem dst(x->argument_at(2), this);
878 LIRItem dst_pos(x->argument_at(3), this);
879 LIRItem length(x->argument_at(4), this);
880
881 LIR_Opr dst_op = dst.result();
882 LIR_Opr src_op = src.result();
883 DecoratorSet decorators = IN_HEAP;
884 if (!x->arg_needs_null_check(2)) {
885 decorators |= IS_NOT_NULL;
886 }
887 dst_op = access_resolve_for_write(decorators, dst_op, info);
888 decorators = IN_HEAP;
889 if (!x->arg_needs_null_check(0)) {
890 decorators |= IS_NOT_NULL;
891 }
892 src_op = access_resolve_for_read(decorators, src_op, info);
893
894 // operands for arraycopy must use fixed registers, otherwise
895 // LinearScan will fail allocation (because arraycopy always needs a
896 // call)
897
898 // The java calling convention will give us enough registers
899 // so that on the stub side the args will be perfect already.
900 // On the other slow/special case side we call C and the arg
901 // positions are not similar enough to pick one as the best.
902 // Also because the java calling convention is a "shifted" version
903 // of the C convention we can process the java args trivially into C
904 // args without worry of overwriting during the xfer
905
906 src_op = force_opr_to(src_op, FrameMap::as_oop_opr(j_rarg0));
907 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
908 dst_op = force_opr_to(dst_op, FrameMap::as_oop_opr(j_rarg2));
909 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
910 length.load_item_force (FrameMap::as_opr(j_rarg4));
911
912 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
913
914 set_no_result(x);
915
916 int flags;
917 ciArrayKlass* expected_type;
918 arraycopy_helper(x, &flags, &expected_type);
919
920 __ arraycopy(src_op, src_pos.result(), dst_op, dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
921 }
922
923 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
924 assert(UseCRC32Intrinsics, "why are we here?");
925 // Make all state_for calls early since they can emit code
926 LIR_Opr result = rlock_result(x);
927 int flags = 0;
928 switch (x->id()) {
929 case vmIntrinsics::_updateCRC32: {
930 LIRItem crc(x->argument_at(0), this);
931 LIRItem val(x->argument_at(1), this);
932 // val is destroyed by update_crc32
933 val.set_destroys_register();
934 crc.load_item();
935 val.load_item();
936 __ update_crc32(crc.result(), val.result(), result);
937 break;
938 }
939 case vmIntrinsics::_updateBytesCRC32:
940 case vmIntrinsics::_updateByteBufferCRC32: {
|
326 LIRItem obj(x->obj(), this);
327 obj.load_item();
328
329 set_no_result(x);
330
331 // "lock" stores the address of the monitor stack slot, so this is not an oop
332 LIR_Opr lock = new_register(T_INT);
333 // Need a scratch register for biased locking
334 LIR_Opr scratch = LIR_OprFact::illegalOpr;
335 if (UseBiasedLocking) {
336 scratch = new_register(T_INT);
337 }
338
339 CodeEmitInfo* info_for_exception = NULL;
340 if (x->needs_null_check()) {
341 info_for_exception = state_for(x);
342 }
343 // this CodeEmitInfo must not have the xhandlers because here the
344 // object is already locked (xhandlers expect object to be unlocked)
345 CodeEmitInfo* info = state_for(x, x->state(), true);
346 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
347 x->monitor_no(), info_for_exception, info);
348 }
349
350
351 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
352 assert(x->is_pinned(),"");
353
354 LIRItem obj(x->obj(), this);
355 obj.dont_load_item();
356
357 LIR_Opr lock = new_register(T_INT);
358 LIR_Opr obj_temp = new_register(T_INT);
359 set_no_result(x);
360 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
361 }
362
363
364 void LIRGenerator::do_NegateOp(NegateOp* x) {
365
366 LIRItem from(x->x(), this);
855 }
856 break;
857 default: ShouldNotReachHere();
858 }
859 __ move(result_reg, calc_result);
860 }
861
862
863 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
864 assert(x->number_of_arguments() == 5, "wrong type");
865
866 // Make all state_for calls early since they can emit code
867 CodeEmitInfo* info = state_for(x, x->state());
868
869 LIRItem src(x->argument_at(0), this);
870 LIRItem src_pos(x->argument_at(1), this);
871 LIRItem dst(x->argument_at(2), this);
872 LIRItem dst_pos(x->argument_at(3), this);
873 LIRItem length(x->argument_at(4), this);
874
875 // operands for arraycopy must use fixed registers, otherwise
876 // LinearScan will fail allocation (because arraycopy always needs a
877 // call)
878
879 // The java calling convention will give us enough registers
880 // so that on the stub side the args will be perfect already.
881 // On the other slow/special case side we call C and the arg
882 // positions are not similar enough to pick one as the best.
883 // Also because the java calling convention is a "shifted" version
884 // of the C convention we can process the java args trivially into C
885 // args without worry of overwriting during the xfer
886
887 src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
888 src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
889 dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
890 dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
891 length.load_item_force (FrameMap::as_opr(j_rarg4));
892
893 LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
894
895 set_no_result(x);
896
897 int flags;
898 ciArrayKlass* expected_type;
899 arraycopy_helper(x, &flags, &expected_type);
900
901 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
902 }
903
904 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
905 assert(UseCRC32Intrinsics, "why are we here?");
906 // Make all state_for calls early since they can emit code
907 LIR_Opr result = rlock_result(x);
908 int flags = 0;
909 switch (x->id()) {
910 case vmIntrinsics::_updateCRC32: {
911 LIRItem crc(x->argument_at(0), this);
912 LIRItem val(x->argument_at(1), this);
913 // val is destroyed by update_crc32
914 val.set_destroys_register();
915 crc.load_item();
916 val.load_item();
917 __ update_crc32(crc.result(), val.result(), result);
918 break;
919 }
920 case vmIntrinsics::_updateBytesCRC32:
921 case vmIntrinsics::_updateByteBufferCRC32: {
|