122 }
123 }
124
125 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
126 if (c->type() == T_INT) {
127 return Immediate::is_simm20(c->as_jint());
128 } else if (c->type() == T_LONG) {
129 return Immediate::is_simm20(c->as_jlong());
130 }
131 return false;
132 }
133
134 LIR_Opr LIRGenerator::safepoint_poll_register() {
135 return new_register(longType);
136 }
137
138 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
139 int shift, int disp, BasicType type) {
140 assert(base->is_register(), "must be");
141 if (index->is_constant()) {
142 intptr_t large_disp = ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp;
143 if (Displacement::is_validDisp(large_disp)) {
144 return new LIR_Address(base, large_disp, type);
145 }
146 // Index is illegal so replace it with the displacement loaded into a register.
147 index = new_pointer_register();
148 __ move(LIR_OprFact::intptrConst(large_disp), index);
149 return new LIR_Address(base, index, type);
150 } else {
151 if (shift > 0) {
152 LIR_Opr tmp = new_pointer_register();
153 __ shift_left(index, shift, tmp);
154 index = tmp;
155 }
156 return new LIR_Address(base, index, disp, type);
157 }
158 }
159
160 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
161 BasicType type, bool needs_card_mark) {
162 int elem_size = type2aelembytes(type);
163 int shift = exact_log2(elem_size);
164 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
165
166 LIR_Address* addr;
167 if (index_opr->is_constant()) {
168 addr = new LIR_Address(array_opr,
169 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
170 } else {
171 if (index_opr->type() == T_INT) {
172 LIR_Opr tmp = new_register(T_LONG);
173 __ convert(Bytecodes::_i2l, index_opr, tmp);
174 index_opr = tmp;
175 }
176 if (shift > 0) {
177 __ shift_left(index_opr, shift, index_opr);
178 }
179 addr = new LIR_Address(array_opr,
180 index_opr,
181 offset_in_bytes, type);
182 }
183 if (needs_card_mark) {
184 // This store will need a precise card mark, so go ahead and
185 // compute the full adddres instead of computing once for the
186 // store and again for the card mark.
187 LIR_Opr tmp = new_pointer_register();
188 __ leal(LIR_OprFact::address(addr), tmp);
189 return new LIR_Address(tmp, type);
190 } else {
191 return addr;
192 }
193 }
194
195 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
196 LIR_Opr r = LIR_OprFact::illegalOpr;
197 if (type == T_LONG) {
198 r = LIR_OprFact::longConst(x);
199 } else if (type == T_INT) {
200 r = LIR_OprFact::intConst(x);
201 } else {
202 ShouldNotReachHere();
203 }
204 return r;
205 }
206
207 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
208 LIR_Opr pointer = new_pointer_register();
209 __ move(LIR_OprFact::intptrConst(counter), pointer);
210 LIR_Address* addr = new LIR_Address(pointer, type);
211 increment_counter(addr, step);
|
122 }
123 }
124
125 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
126 if (c->type() == T_INT) {
127 return Immediate::is_simm20(c->as_jint());
128 } else if (c->type() == T_LONG) {
129 return Immediate::is_simm20(c->as_jlong());
130 }
131 return false;
132 }
133
134 LIR_Opr LIRGenerator::safepoint_poll_register() {
135 return new_register(longType);
136 }
137
138 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
139 int shift, int disp, BasicType type) {
140 assert(base->is_register(), "must be");
141 if (index->is_constant()) {
142 intx large_disp = disp;
143 LIR_Const *constant = index->as_constant_ptr();
144 if (constant->type() == T_LONG) {
145 large_disp += constant->as_jlong() << shift;
146 } else {
147 large_disp += (intx)(constant->as_jint()) << shift;
148 }
149 if (Displacement::is_validDisp(large_disp)) {
150 return new LIR_Address(base, large_disp, type);
151 }
152 // Index is illegal so replace it with the displacement loaded into a register.
153 index = new_pointer_register();
154 __ move(LIR_OprFact::intptrConst(large_disp), index);
155 return new LIR_Address(base, index, type);
156 } else {
157 if (shift > 0) {
158 LIR_Opr tmp = new_pointer_register();
159 __ shift_left(index, shift, tmp);
160 index = tmp;
161 }
162 return new LIR_Address(base, index, disp, type);
163 }
164 }
165
166 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
167 BasicType type) {
168 int elem_size = type2aelembytes(type);
169 int shift = exact_log2(elem_size);
170 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
171
172 LIR_Address* addr;
173 if (index_opr->is_constant()) {
174 addr = new LIR_Address(array_opr,
175 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
176 } else {
177 if (index_opr->type() == T_INT) {
178 LIR_Opr tmp = new_register(T_LONG);
179 __ convert(Bytecodes::_i2l, index_opr, tmp);
180 index_opr = tmp;
181 }
182 if (shift > 0) {
183 __ shift_left(index_opr, shift, index_opr);
184 }
185 addr = new LIR_Address(array_opr,
186 index_opr,
187 offset_in_bytes, type);
188 }
189 return addr;
190 }
191
192 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
193 LIR_Opr r = LIR_OprFact::illegalOpr;
194 if (type == T_LONG) {
195 r = LIR_OprFact::longConst(x);
196 } else if (type == T_INT) {
197 r = LIR_OprFact::intConst(x);
198 } else {
199 ShouldNotReachHere();
200 }
201 return r;
202 }
203
204 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
205 LIR_Opr pointer = new_pointer_register();
206 __ move(LIR_OprFact::intptrConst(counter), pointer);
207 LIR_Address* addr = new LIR_Address(pointer, type);
208 increment_counter(addr, step);
|
234 return true;
235 } else if (is_power_of_2(c - 1)) {
236 __ move(left, tmp);
237 __ shift_left(left, log2_intptr(c - 1), left);
238 __ add(left, tmp, result);
239 return true;
240 }
241 }
242 return false;
243 }
244
245 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
246 BasicType type = item->type();
247 __ store(item, new LIR_Address(FrameMap::Z_SP_opr, in_bytes(offset_from_sp), type));
248 }
249
250 //----------------------------------------------------------------------
251 // visitor functions
252 //----------------------------------------------------------------------
253
254 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
255 assert(x->is_pinned(),"");
256 bool needs_range_check = x->compute_needs_range_check();
257 bool use_length = x->length() != NULL;
258 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
259 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
260 !get_jobject_constant(x->value())->is_null_object() ||
261 x->should_profile());
262
263 LIRItem array(x->array(), this);
264 LIRItem index(x->index(), this);
265 LIRItem value(x->value(), this);
266 LIRItem length(this);
267
268 array.load_item();
269 index.load_nonconstant(20);
270
271 if (use_length && needs_range_check) {
272 length.set_instruction(x->length());
273 length.load_item();
274 }
275 if (needs_store_check || x->check_boolean()) {
276 value.load_item();
277 } else {
278 value.load_for_store(x->elt_type());
279 }
280
281 set_no_result(x);
282
283 // The CodeEmitInfo must be duplicated for each different
284 // LIR-instruction because spilling can occur anywhere between two
285 // instructions and so the debug information must be different.
286 CodeEmitInfo* range_check_info = state_for (x);
287 CodeEmitInfo* null_check_info = NULL;
288 if (x->needs_null_check()) {
289 null_check_info = new CodeEmitInfo(range_check_info);
290 }
291
292 // Emit array address setup early so it schedules better.
293 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
294 if (value.result()->is_constant() && array_addr->index()->is_valid()) {
295 // Constants cannot be stored with index register on ZARCH_64 (see LIR_Assembler::const2mem()).
296 LIR_Opr tmp = new_pointer_register();
297 __ leal(LIR_OprFact::address(array_addr), tmp);
298 array_addr = new LIR_Address(tmp, x->elt_type());
299 }
300
301 if (GenerateRangeChecks && needs_range_check) {
302 if (use_length) {
303 __ cmp(lir_cond_belowEqual, length.result(), index.result());
304 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
305 } else {
306 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
307 // Range_check also does the null check.
308 null_check_info = NULL;
309 }
310 }
311
312 if (GenerateArrayStoreCheck && needs_store_check) {
313 LIR_Opr tmp1 = new_register(objectType);
314 LIR_Opr tmp2 = new_register(objectType);
315 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
316
317 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
318 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
319 }
320
321 if (obj_store) {
322 // Needs GC write barriers.
323 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
324 true /* do_load */, false /* patch */, NULL);
325 }
326
327 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
328 __ move(result, array_addr, null_check_info);
329
330 if (obj_store) {
331 // Precise card mark
332 post_barrier(LIR_OprFact::address(array_addr), value.result());
333 }
334 }
335
336 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
337 assert(x->is_pinned(),"");
338 LIRItem obj(x->obj(), this);
339 obj.load_item();
340
341 set_no_result(x);
342
343 // "lock" stores the address of the monitor stack slot, so this is not an oop.
344 LIR_Opr lock = new_register(T_INT);
345
346 CodeEmitInfo* info_for_exception = NULL;
347 if (x->needs_null_check()) {
348 info_for_exception = state_for (x);
349 }
350 // This CodeEmitInfo must not have the xhandlers because here the
351 // object is already locked (xhandlers expect object to be unlocked).
352 CodeEmitInfo* info = state_for (x, x->state(), true);
|
231 return true;
232 } else if (is_power_of_2(c - 1)) {
233 __ move(left, tmp);
234 __ shift_left(left, log2_intptr(c - 1), left);
235 __ add(left, tmp, result);
236 return true;
237 }
238 }
239 return false;
240 }
241
242 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
243 BasicType type = item->type();
244 __ store(item, new LIR_Address(FrameMap::Z_SP_opr, in_bytes(offset_from_sp), type));
245 }
246
247 //----------------------------------------------------------------------
248 // visitor functions
249 //----------------------------------------------------------------------
250
251 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int pro
252 LIR_Opr tmp1 = new_register(objectType);
253 LIR_Opr tmp2 = new_register(objectType);
254 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
255 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
256 }
257
258 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
259 assert(x->is_pinned(),"");
260 LIRItem obj(x->obj(), this);
261 obj.load_item();
262
263 set_no_result(x);
264
265 // "lock" stores the address of the monitor stack slot, so this is not an oop.
266 LIR_Opr lock = new_register(T_INT);
267
268 CodeEmitInfo* info_for_exception = NULL;
269 if (x->needs_null_check()) {
270 info_for_exception = state_for (x);
271 }
272 // This CodeEmitInfo must not have the xhandlers because here the
273 // object is already locked (xhandlers expect object to be unlocked).
274 CodeEmitInfo* info = state_for (x, x->state(), true);
|
647 logic_op(x->op(), reg, left.result(), right.result());
648 }
649
650 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
651 void LIRGenerator::do_CompareOp(CompareOp* x) {
652 LIRItem left(x->x(), this);
653 LIRItem right(x->y(), this);
654 left.load_item();
655 right.load_item();
656 LIR_Opr reg = rlock_result(x);
657 if (x->x()->type()->is_float_kind()) {
658 Bytecodes::Code code = x->op();
659 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
660 } else if (x->x()->type()->tag() == longTag) {
661 __ lcmp2int(left.result(), right.result(), reg);
662 } else {
663 ShouldNotReachHere();
664 }
665 }
666
667 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
668 assert(x->number_of_arguments() == 4, "wrong type");
669 LIRItem obj (x->argument_at(0), this); // object
670 LIRItem offset(x->argument_at(1), this); // offset of field
671 LIRItem cmp (x->argument_at(2), this); // Value to compare with field.
672 LIRItem val (x->argument_at(3), this); // Replace field with val if matches cmp.
673
674 // Get address of field.
675 obj.load_item();
676 offset.load_nonconstant(20);
677 cmp.load_item();
678 val.load_item();
679
680 LIR_Opr addr = new_pointer_register();
681 LIR_Address* a;
682 if (offset.result()->is_constant()) {
683 assert(Immediate::is_simm20(offset.result()->as_jlong()), "should have been loaded into register");
684 a = new LIR_Address(obj.result(),
685 offset.result()->as_jlong(),
686 as_BasicType(type));
687 } else {
688 a = new LIR_Address(obj.result(),
689 offset.result(),
690 0,
691 as_BasicType(type));
692 }
693 __ leal(LIR_OprFact::address(a), addr);
694
695 if (type == objectType) { // Write-barrier needed for Object fields.
696 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
697 true /* do_load */, false /* patch */, NULL);
698 }
699
700 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
701 if (type == objectType) {
702 __ cas_obj(addr, cmp.result(), val.result(), new_register(T_OBJECT), new_register(T_OBJECT));
703 } else if (type == intType) {
704 __ cas_int(addr, cmp.result(), val.result(), ill, ill);
705 } else if (type == longType) {
706 __ cas_long(addr, cmp.result(), val.result(), ill, ill);
707 } else {
708 ShouldNotReachHere();
709 }
710 // Generate conditional move of boolean result.
711 LIR_Opr result = rlock_result(x);
712 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
713 result, as_BasicType(type));
714 if (type == objectType) { // Write-barrier needed for Object fields.
715 // Precise card mark since could either be object or array
716 post_barrier(addr, val.result());
717 }
718 }
719
720
721 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
722 switch (x->id()) {
723 case vmIntrinsics::_dabs:
724 case vmIntrinsics::_dsqrt: {
725 assert(x->number_of_arguments() == 1, "wrong type");
726 LIRItem value(x->argument_at(0), this);
727 value.load_item();
728 LIR_Opr dst = rlock_result(x);
729
730 switch (x->id()) {
731 case vmIntrinsics::_dsqrt: {
732 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
733 break;
734 }
735 case vmIntrinsics::_dabs: {
736 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
737 break;
738 }
|
569 logic_op(x->op(), reg, left.result(), right.result());
570 }
571
572 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
573 void LIRGenerator::do_CompareOp(CompareOp* x) {
574 LIRItem left(x->x(), this);
575 LIRItem right(x->y(), this);
576 left.load_item();
577 right.load_item();
578 LIR_Opr reg = rlock_result(x);
579 if (x->x()->type()->is_float_kind()) {
580 Bytecodes::Code code = x->op();
581 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
582 } else if (x->x()->type()->tag() == longTag) {
583 __ lcmp2int(left.result(), right.result(), reg);
584 } else {
585 ShouldNotReachHere();
586 }
587 }
588
589 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
590 LIR_Opr t1 = LIR_OprFact::illegalOpr;
591 LIR_Opr t2 = LIR_OprFact::illegalOpr;
592 cmp_value.load_item();
593 new_value.load_item();
594 if (type == T_OBJECT) {
595 if (UseCompressedOops) {
596 t1 = new_register(T_OBJECT);
597 t2 = new_register(T_OBJECT);
598 }
599 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
600 } else if (type == T_INT) {
601 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
602 } else if (type == T_LONG) {
603 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
604 } else {
605 ShouldNotReachHere();
606 }
607 // Generate conditional move of boolean result.
608 LIR_Opr result = new_register(T_INT);
609 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
610 result, type);
611 return result;
612 }
613
614 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
615 Unimplemented(); // Currently not supported on this platform.
616 return LIR_OprFact::illegalOpr;
617 }
618
619 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
620 LIR_Opr result = new_register(type);
621 value.load_item();
622 __ xadd(addr, value.result(), result, LIR_OprFact::illegalOpr);
623 return result;
624 }
625
626 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
627 switch (x->id()) {
628 case vmIntrinsics::_dabs:
629 case vmIntrinsics::_dsqrt: {
630 assert(x->number_of_arguments() == 1, "wrong type");
631 LIRItem value(x->argument_at(0), this);
632 value.load_item();
633 LIR_Opr dst = rlock_result(x);
634
635 switch (x->id()) {
636 case vmIntrinsics::_dsqrt: {
637 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
638 break;
639 }
640 case vmIntrinsics::_dabs: {
641 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
642 break;
643 }
|
1084 LIR_Opr LIRGenerator::getThreadPointer() {
1085 return FrameMap::as_pointer_opr(Z_thread);
1086 }
1087
1088 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1089 __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::Z_R2_opr);
1090 LIR_OprList* args = new LIR_OprList(1);
1091 args->append(FrameMap::Z_R2_opr);
1092 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1093 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1094 }
1095
1096 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1097 CodeEmitInfo* info) {
1098 __ store(value, address, info);
1099 }
1100
1101 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1102 CodeEmitInfo* info) {
1103 __ load(address, result, info);
1104 }
1105
1106
1107 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1108 BasicType type, bool is_volatile) {
1109 LIR_Address* addr = new LIR_Address(src, offset, type);
1110 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1111 if (is_obj) {
1112 // Do the pre-write barrier, if any.
1113 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1114 true /* do_load */, false /* patch */, NULL);
1115 __ move(data, addr);
1116 assert(src->is_register(), "must be register");
1117 // Seems to be a precise address.
1118 post_barrier(LIR_OprFact::address(addr), data);
1119 } else {
1120 __ move(data, addr);
1121 }
1122 }
1123
1124
1125 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1126 BasicType type, bool is_volatile) {
1127 LIR_Address* addr = new LIR_Address(src, offset, type);
1128 __ load(addr, dst);
1129 }
1130
1131 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1132 BasicType type = x->basic_type();
1133 assert (x->is_add() && type != T_ARRAY && type != T_OBJECT, "not supported");
1134 LIRItem src(x->object(), this);
1135 LIRItem off(x->offset(), this);
1136 LIRItem value(x->value(), this);
1137
1138 src.load_item();
1139 value.load_item();
1140 off.load_nonconstant(20);
1141
1142 LIR_Opr dst = rlock_result(x, type);
1143 LIR_Opr data = value.result();
1144 LIR_Opr offset = off.result();
1145
1146 LIR_Address* addr;
1147 if (offset->is_constant()) {
1148 assert(Immediate::is_simm20(offset->as_jlong()), "should have been loaded into register");
1149 addr = new LIR_Address(src.result(), offset->as_jlong(), type);
1150 } else {
1151 addr = new LIR_Address(src.result(), offset, type);
1152 }
1153
1154 __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
1155 }
1156
1157 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1158 assert(UseCRC32Intrinsics, "or should not be here");
1159 LIR_Opr result = rlock_result(x);
1160
1161 switch (x->id()) {
1162 case vmIntrinsics::_updateCRC32: {
1163 LIRItem crc(x->argument_at(0), this);
1164 LIRItem val(x->argument_at(1), this);
1165 // Registers destroyed by update_crc32.
1166 crc.set_destroys_register();
1167 val.set_destroys_register();
1168 crc.load_item();
1169 val.load_item();
1170 __ update_crc32(crc.result(), val.result(), result);
1171 break;
1172 }
1173 case vmIntrinsics::_updateBytesCRC32:
|
989 LIR_Opr LIRGenerator::getThreadPointer() {
990 return FrameMap::as_pointer_opr(Z_thread);
991 }
992
993 void LIRGenerator::trace_block_entry(BlockBegin* block) {
994 __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::Z_R2_opr);
995 LIR_OprList* args = new LIR_OprList(1);
996 args->append(FrameMap::Z_R2_opr);
997 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
998 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
999 }
1000
1001 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1002 CodeEmitInfo* info) {
1003 __ store(value, address, info);
1004 }
1005
1006 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1007 CodeEmitInfo* info) {
1008 __ load(address, result, info);
1009 }
1010
1011 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1012 assert(UseCRC32Intrinsics, "or should not be here");
1013 LIR_Opr result = rlock_result(x);
1014
1015 switch (x->id()) {
1016 case vmIntrinsics::_updateCRC32: {
1017 LIRItem crc(x->argument_at(0), this);
1018 LIRItem val(x->argument_at(1), this);
1019 // Registers destroyed by update_crc32.
1020 crc.set_destroys_register();
1021 val.set_destroys_register();
1022 crc.load_item();
1023 val.load_item();
1024 __ update_crc32(crc.result(), val.result(), result);
1025 break;
1026 }
1027 case vmIntrinsics::_updateBytesCRC32:
|