15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "c1/c1_Compilation.hpp"
26 #include "c1/c1_FrameMap.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_LIRGenerator.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArray.hpp"
33 #include "ci/ciObjArrayKlass.hpp"
34 #include "ci/ciTypeArrayKlass.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "vmreg_x86.inline.hpp"
38
39 #ifdef ASSERT
40 #define __ gen()->lir(__FILE__, __LINE__)->
41 #else
42 #define __ gen()->lir()->
43 #endif
44
45 // Item will be loaded into a byte register; Intel only
46 void LIRItem::load_byte_item() {
47 load_item();
48 LIR_Opr res = result();
49
50 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
51 // make sure that it is a byte register
52 assert(!value()->type()->is_float() && !value()->type()->is_double(),
53 "can't load floats in byte register");
|
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "c1/c1_Compilation.hpp"
26 #include "c1/c1_FrameMap.hpp"
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_LIRGenerator.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArray.hpp"
33 #include "ci/ciObjArrayKlass.hpp"
34 #include "ci/ciTypeArrayKlass.hpp"
35 #include "gc/shared/c1/barrierSetC1.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "vmreg_x86.inline.hpp"
39
40 #ifdef ASSERT
41 #define __ gen()->lir(__FILE__, __LINE__)->
42 #else
43 #define __ gen()->lir()->
44 #endif
45
46 // Item will be loaded into a byte register; Intel only
47 void LIRItem::load_byte_item() {
48 load_item();
49 LIR_Opr res = result();
50
51 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
52 // make sure that it is a byte register
53 assert(!value()->type()->is_float() && !value()->type()->is_double(),
54 "can't load floats in byte register");
|
134 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
135 }
136
137
138 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
139 if (c->type() == T_LONG) return false;
140 return c->type() != T_OBJECT || c->as_jobject() == NULL;
141 }
142
143
144 LIR_Opr LIRGenerator::safepoint_poll_register() {
145 NOT_LP64( if (SafepointMechanism::uses_thread_local_poll()) { return new_register(T_ADDRESS); } )
146 return LIR_OprFact::illegalOpr;
147 }
148
149
150 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
151 int shift, int disp, BasicType type) {
152 assert(base->is_register(), "must be");
153 if (index->is_constant()) {
154 return new LIR_Address(base,
155 ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp,
156 type);
157 } else {
158 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
159 }
160 }
161
162
163 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
164 BasicType type, bool needs_card_mark) {
165 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
166
167 LIR_Address* addr;
168 if (index_opr->is_constant()) {
169 int elem_size = type2aelembytes(type);
170 addr = new LIR_Address(array_opr,
171 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
172 } else {
173 #ifdef _LP64
174 if (index_opr->type() == T_INT) {
175 LIR_Opr tmp = new_register(T_LONG);
176 __ convert(Bytecodes::_i2l, index_opr, tmp);
177 index_opr = tmp;
178 }
179 #endif // _LP64
180 addr = new LIR_Address(array_opr,
181 index_opr,
182 LIR_Address::scale(type),
183 offset_in_bytes, type);
184 }
185 if (needs_card_mark) {
186 // This store will need a precise card mark, so go ahead and
187 // compute the full adddres instead of computing once for the
188 // store and again for the card mark.
189 LIR_Opr tmp = new_pointer_register();
190 __ leal(LIR_OprFact::address(addr), tmp);
191 return new LIR_Address(tmp, type);
192 } else {
193 return addr;
194 }
195 }
196
197
198 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
199 LIR_Opr r = NULL;
200 if (type == T_LONG) {
201 r = LIR_OprFact::longConst(x);
202 } else if (type == T_INT) {
203 r = LIR_OprFact::intConst(x);
204 } else {
205 ShouldNotReachHere();
206 }
207 return r;
208 }
209
210 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
211 LIR_Opr pointer = new_pointer_register();
212 __ move(LIR_OprFact::intptrConst(counter), pointer);
213 LIR_Address* addr = new LIR_Address(pointer, type);
|
135 (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
136 }
137
138
139 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
140 if (c->type() == T_LONG) return false;
141 return c->type() != T_OBJECT || c->as_jobject() == NULL;
142 }
143
144
145 LIR_Opr LIRGenerator::safepoint_poll_register() {
146 NOT_LP64( if (SafepointMechanism::uses_thread_local_poll()) { return new_register(T_ADDRESS); } )
147 return LIR_OprFact::illegalOpr;
148 }
149
150
151 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
152 int shift, int disp, BasicType type) {
153 assert(base->is_register(), "must be");
154 if (index->is_constant()) {
155 LIR_Const *constant = index->as_constant_ptr();
156 #ifdef _LP64
157 jlong c;
158 if (constant->type() == T_INT) {
159 c = (jlong(index->as_jint()) << shift) + disp;
160 } else {
161 assert(constant->type() == T_LONG, "should be");
162 c = (index->as_jlong() << shift) + disp;
163 }
164 if ((jlong)((jint)c) == c) {
165 return new LIR_Address(base, (jint)c, type);
166 } else {
167 LIR_Opr tmp = new_register(T_LONG);
168 __ move(index, tmp);
169 return new LIR_Address(base, tmp, type);
170 }
171 #else
172 return new LIR_Address(base,
173 ((intx)(constant->as_jint()) << shift) + disp,
174 type);
175 #endif
176 } else {
177 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
178 }
179 }
180
181
182 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
183 BasicType type) {
184 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
185
186 LIR_Address* addr;
187 if (index_opr->is_constant()) {
188 int elem_size = type2aelembytes(type);
189 addr = new LIR_Address(array_opr,
190 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
191 } else {
192 #ifdef _LP64
193 if (index_opr->type() == T_INT) {
194 LIR_Opr tmp = new_register(T_LONG);
195 __ convert(Bytecodes::_i2l, index_opr, tmp);
196 index_opr = tmp;
197 }
198 #endif // _LP64
199 addr = new LIR_Address(array_opr,
200 index_opr,
201 LIR_Address::scale(type),
202 offset_in_bytes, type);
203 }
204 return addr;
205 }
206
207
208 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
209 LIR_Opr r = NULL;
210 if (type == T_LONG) {
211 r = LIR_OprFact::longConst(x);
212 } else if (type == T_INT) {
213 r = LIR_OprFact::intConst(x);
214 } else {
215 ShouldNotReachHere();
216 }
217 return r;
218 }
219
220 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
221 LIR_Opr pointer = new_pointer_register();
222 __ move(LIR_OprFact::intptrConst(counter), pointer);
223 LIR_Address* addr = new LIR_Address(pointer, type);
|
235 __ move(left, tmp);
236 __ shift_left(left, log2_intptr(c + 1), left);
237 __ sub(left, tmp, result);
238 return true;
239 } else if (is_power_of_2(c - 1)) {
240 __ move(left, tmp);
241 __ shift_left(left, log2_intptr(c - 1), left);
242 __ add(left, tmp, result);
243 return true;
244 }
245 }
246 return false;
247 }
248
249
250 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
251 BasicType type = item->type();
252 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
253 }
254
255 //----------------------------------------------------------------------
256 // visitor functions
257 //----------------------------------------------------------------------
258
259
260 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
261 assert(x->is_pinned(),"");
262 bool needs_range_check = x->compute_needs_range_check();
263 bool use_length = x->length() != NULL;
264 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
265 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
266 !get_jobject_constant(x->value())->is_null_object() ||
267 x->should_profile());
268
269 LIRItem array(x->array(), this);
270 LIRItem index(x->index(), this);
271 LIRItem value(x->value(), this);
272 LIRItem length(this);
273
274 array.load_item();
275 index.load_nonconstant();
276
277 if (use_length && needs_range_check) {
278 length.set_instruction(x->length());
279 length.load_item();
280
281 }
282 if (needs_store_check || x->check_boolean()) {
283 value.load_item();
284 } else {
285 value.load_for_store(x->elt_type());
286 }
287
288 set_no_result(x);
289
290 // the CodeEmitInfo must be duplicated for each different
291 // LIR-instruction because spilling can occur anywhere between two
292 // instructions and so the debug information must be different
293 CodeEmitInfo* range_check_info = state_for(x);
294 CodeEmitInfo* null_check_info = NULL;
295 if (x->needs_null_check()) {
296 null_check_info = new CodeEmitInfo(range_check_info);
297 }
298
299 // emit array address setup early so it schedules better
300 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
301
302 if (GenerateRangeChecks && needs_range_check) {
303 if (use_length) {
304 __ cmp(lir_cond_belowEqual, length.result(), index.result());
305 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
306 } else {
307 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
308 // range_check also does the null check
309 null_check_info = NULL;
310 }
311 }
312
313 if (GenerateArrayStoreCheck && needs_store_check) {
314 LIR_Opr tmp1 = new_register(objectType);
315 LIR_Opr tmp2 = new_register(objectType);
316 LIR_Opr tmp3 = new_register(objectType);
317
318 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
319 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
320 }
321
322 if (obj_store) {
323 // Needs GC write barriers.
324 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
325 true /* do_load */, false /* patch */, NULL);
326 __ move(value.result(), array_addr, null_check_info);
327 // Seems to be a precise
328 post_barrier(LIR_OprFact::address(array_addr), value.result());
329 } else {
330 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
331 __ move(result, array_addr, null_check_info);
332 }
333 }
334
335
336 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
337 assert(x->is_pinned(),"");
338 LIRItem obj(x->obj(), this);
339 obj.load_item();
340
341 set_no_result(x);
342
343 // "lock" stores the address of the monitor stack slot, so this is not an oop
344 LIR_Opr lock = new_register(T_INT);
345 // Need a scratch register for biased locking on x86
346 LIR_Opr scratch = LIR_OprFact::illegalOpr;
347 if (UseBiasedLocking) {
348 scratch = new_register(T_INT);
349 }
350
351 CodeEmitInfo* info_for_exception = NULL;
352 if (x->needs_null_check()) {
353 info_for_exception = state_for(x);
354 }
|
245 __ move(left, tmp);
246 __ shift_left(left, log2_intptr(c + 1), left);
247 __ sub(left, tmp, result);
248 return true;
249 } else if (is_power_of_2(c - 1)) {
250 __ move(left, tmp);
251 __ shift_left(left, log2_intptr(c - 1), left);
252 __ add(left, tmp, result);
253 return true;
254 }
255 }
256 return false;
257 }
258
259
260 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
261 BasicType type = item->type();
262 __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
263 }
264
265 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int pro
266 LIR_Opr tmp1 = new_register(objectType);
267 LIR_Opr tmp2 = new_register(objectType);
268 LIR_Opr tmp3 = new_register(objectType);
269 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
270 }
271
272 //----------------------------------------------------------------------
273 // visitor functions
274 //----------------------------------------------------------------------
275
276 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
277 assert(x->is_pinned(),"");
278 LIRItem obj(x->obj(), this);
279 obj.load_item();
280
281 set_no_result(x);
282
283 // "lock" stores the address of the monitor stack slot, so this is not an oop
284 LIR_Opr lock = new_register(T_INT);
285 // Need a scratch register for biased locking on x86
286 LIR_Opr scratch = LIR_OprFact::illegalOpr;
287 if (UseBiasedLocking) {
288 scratch = new_register(T_INT);
289 }
290
291 CodeEmitInfo* info_for_exception = NULL;
292 if (x->needs_null_check()) {
293 info_for_exception = state_for(x);
294 }
|
697 LIRItem left(x->x(), this);
698 LIRItem right(x->y(), this);
699 ValueTag tag = x->x()->type()->tag();
700 if (tag == longTag) {
701 left.set_destroys_register();
702 }
703 left.load_item();
704 right.load_item();
705 LIR_Opr reg = rlock_result(x);
706
707 if (x->x()->type()->is_float_kind()) {
708 Bytecodes::Code code = x->op();
709 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
710 } else if (x->x()->type()->tag() == longTag) {
711 __ lcmp2int(left.result(), right.result(), reg);
712 } else {
713 Unimplemented();
714 }
715 }
716
717
718 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
719 assert(x->number_of_arguments() == 4, "wrong type");
720 LIRItem obj (x->argument_at(0), this); // object
721 LIRItem offset(x->argument_at(1), this); // offset of field
722 LIRItem cmp (x->argument_at(2), this); // value to compare with field
723 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
724
725 assert(obj.type()->tag() == objectTag, "invalid type");
726
727 // In 64bit the type can be long, sparc doesn't have this assert
728 // assert(offset.type()->tag() == intTag, "invalid type");
729
730 assert(cmp.type()->tag() == type->tag(), "invalid type");
731 assert(val.type()->tag() == type->tag(), "invalid type");
732
733 // get address of field
734 obj.load_item();
735 offset.load_nonconstant();
736
737 LIR_Opr addr = new_pointer_register();
738 LIR_Address* a;
739 if(offset.result()->is_constant()) {
740 #ifdef _LP64
741 jlong c = offset.result()->as_jlong();
742 if ((jlong)((jint)c) == c) {
743 a = new LIR_Address(obj.result(),
744 (jint)c,
745 as_BasicType(type));
746 } else {
747 LIR_Opr tmp = new_register(T_LONG);
748 __ move(offset.result(), tmp);
749 a = new LIR_Address(obj.result(),
750 tmp,
751 as_BasicType(type));
752 }
753 #else
754 a = new LIR_Address(obj.result(),
755 offset.result()->as_jint(),
756 as_BasicType(type));
757 #endif
758 } else {
759 a = new LIR_Address(obj.result(),
760 offset.result(),
761 0,
762 as_BasicType(type));
763 }
764 __ leal(LIR_OprFact::address(a), addr);
765
766 if (type == objectType) { // Write-barrier needed for Object fields.
767 // Do the pre-write barrier, if any.
768 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
769 true /* do_load */, false /* patch */, NULL);
770 }
771
772 if (type == objectType) {
773 cmp.load_item_force(FrameMap::rax_oop_opr);
774 val.load_item();
775 } else if (type == intType) {
776 cmp.load_item_force(FrameMap::rax_opr);
777 val.load_item();
778 } else if (type == longType) {
779 cmp.load_item_force(FrameMap::long0_opr);
780 val.load_item_force(FrameMap::long1_opr);
781 } else {
782 ShouldNotReachHere();
783 }
784
785 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
786 if (type == objectType)
787 __ cas_obj(addr, cmp.result(), val.result(), ill, ill);
788 else if (type == intType)
789 __ cas_int(addr, cmp.result(), val.result(), ill, ill);
790 else if (type == longType)
791 __ cas_long(addr, cmp.result(), val.result(), ill, ill);
792 else {
793 ShouldNotReachHere();
794 }
795
796 // generate conditional move of boolean result
797 LIR_Opr result = rlock_result(x);
798 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
799 result, as_BasicType(type));
800 if (type == objectType) { // Write-barrier needed for Object fields.
801 // Seems to be precise
802 post_barrier(addr, val.result());
803 }
804 }
805
806 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
807 assert(x->number_of_arguments() == 3, "wrong type");
808 assert(UseFMA, "Needs FMA instructions support.");
809 LIRItem value(x->argument_at(0), this);
810 LIRItem value1(x->argument_at(1), this);
811 LIRItem value2(x->argument_at(2), this);
812
813 value2.set_destroys_register();
814
815 value.load_item();
816 value1.load_item();
817 value2.load_item();
818
819 LIR_Opr calc_input = value.result();
820 LIR_Opr calc_input1 = value1.result();
821 LIR_Opr calc_input2 = value2.result();
822 LIR_Opr calc_result = rlock_result(x);
|
637 LIRItem left(x->x(), this);
638 LIRItem right(x->y(), this);
639 ValueTag tag = x->x()->type()->tag();
640 if (tag == longTag) {
641 left.set_destroys_register();
642 }
643 left.load_item();
644 right.load_item();
645 LIR_Opr reg = rlock_result(x);
646
647 if (x->x()->type()->is_float_kind()) {
648 Bytecodes::Code code = x->op();
649 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
650 } else if (x->x()->type()->tag() == longTag) {
651 __ lcmp2int(left.result(), right.result(), reg);
652 } else {
653 Unimplemented();
654 }
655 }
656
657 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
658 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
659 if (type == T_OBJECT || type == T_ARRAY) {
660 cmp_value.load_item_force(FrameMap::rax_oop_opr);
661 new_value.load_item();
662 __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
663 } else if (type == T_INT) {
664 cmp_value.load_item_force(FrameMap::rax_opr);
665 new_value.load_item();
666 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
667 } else if (type == T_LONG) {
668 cmp_value.load_item_force(FrameMap::long0_opr);
669 new_value.load_item_force(FrameMap::long1_opr);
670 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
671 } else {
672 Unimplemented();
673 }
674 LIR_Opr result = new_register(T_INT);
675 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
676 result, type);
677 return result;
678 }
679
680 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
681 bool is_oop = type == T_OBJECT || type == T_ARRAY;
682 LIR_Opr result = new_register(type);
683 value.load_item();
684 // Because we want a 2-arg form of xchg and xadd
685 __ move(value.result(), result);
686 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
687 __ xchg(addr, result, result, LIR_OprFact::illegalOpr);
688 return result;
689 }
690
691 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
692 LIR_Opr result = new_register(type);
693 value.load_item();
694 // Because we want a 2-arg form of xchg and xadd
695 __ move(value.result(), result);
696 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
697 __ xadd(addr, result, result, LIR_OprFact::illegalOpr);
698 return result;
699 }
700
701 void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
702 assert(x->number_of_arguments() == 3, "wrong type");
703 assert(UseFMA, "Needs FMA instructions support.");
704 LIRItem value(x->argument_at(0), this);
705 LIRItem value1(x->argument_at(1), this);
706 LIRItem value2(x->argument_at(2), this);
707
708 value2.set_destroys_register();
709
710 value.load_item();
711 value1.load_item();
712 value2.load_item();
713
714 LIR_Opr calc_input = value.result();
715 LIR_Opr calc_input1 = value1.result();
716 LIR_Opr calc_input2 = value2.result();
717 LIR_Opr calc_result = rlock_result(x);
|
1552 CodeEmitInfo* info) {
1553 if (address->type() == T_LONG) {
1554 address = new LIR_Address(address->base(),
1555 address->index(), address->scale(),
1556 address->disp(), T_DOUBLE);
1557 // Transfer the value atomically by using FP moves. This means
1558 // the value has to be moved between CPU and FPU registers. It
1559 // always has to be moved through spill slot since there's no
1560 // quick way to pack the value into an SSE register.
1561 LIR_Opr temp_double = new_register(T_DOUBLE);
1562 LIR_Opr spill = new_register(T_LONG);
1563 set_vreg_flag(spill, must_start_in_memory);
1564 __ move(value, spill);
1565 __ volatile_move(spill, temp_double, T_LONG);
1566 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
1567 } else {
1568 __ store(value, address, info);
1569 }
1570 }
1571
1572
1573
1574 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1575 CodeEmitInfo* info) {
1576 if (address->type() == T_LONG) {
1577 address = new LIR_Address(address->base(),
1578 address->index(), address->scale(),
1579 address->disp(), T_DOUBLE);
1580 // Transfer the value atomically by using FP moves. This means
1581 // the value has to be moved between CPU and FPU registers. In
1582 // SSE0 and SSE1 mode it has to be moved through spill slot but in
1583 // SSE2+ mode it can be moved directly.
1584 LIR_Opr temp_double = new_register(T_DOUBLE);
1585 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1586 __ volatile_move(temp_double, result, T_LONG);
1587 if (UseSSE < 2) {
1588 // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1589 set_vreg_flag(result, must_start_in_memory);
1590 }
1591 } else {
1592 __ load(address, result, info);
1593 }
1594 }
1595
1596 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1597 BasicType type, bool is_volatile) {
1598 if (is_volatile && type == T_LONG) {
1599 LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1600 LIR_Opr tmp = new_register(T_DOUBLE);
1601 __ load(addr, tmp);
1602 LIR_Opr spill = new_register(T_LONG);
1603 set_vreg_flag(spill, must_start_in_memory);
1604 __ move(tmp, spill);
1605 __ move(spill, dst);
1606 } else {
1607 LIR_Address* addr = new LIR_Address(src, offset, type);
1608 __ load(addr, dst);
1609 }
1610 }
1611
1612
1613 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1614 BasicType type, bool is_volatile) {
1615 if (is_volatile && type == T_LONG) {
1616 LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
1617 LIR_Opr tmp = new_register(T_DOUBLE);
1618 LIR_Opr spill = new_register(T_DOUBLE);
1619 set_vreg_flag(spill, must_start_in_memory);
1620 __ move(data, spill);
1621 __ move(spill, tmp);
1622 __ move(tmp, addr);
1623 } else {
1624 LIR_Address* addr = new LIR_Address(src, offset, type);
1625 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1626 if (is_obj) {
1627 // Do the pre-write barrier, if any.
1628 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1629 true /* do_load */, false /* patch */, NULL);
1630 __ move(data, addr);
1631 assert(src->is_register(), "must be register");
1632 // Seems to be a precise address
1633 post_barrier(LIR_OprFact::address(addr), data);
1634 } else {
1635 __ move(data, addr);
1636 }
1637 }
1638 }
1639
1640 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1641 BasicType type = x->basic_type();
1642 LIRItem src(x->object(), this);
1643 LIRItem off(x->offset(), this);
1644 LIRItem value(x->value(), this);
1645
1646 src.load_item();
1647 value.load_item();
1648 off.load_nonconstant();
1649
1650 LIR_Opr dst = rlock_result(x, type);
1651 LIR_Opr data = value.result();
1652 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1653 LIR_Opr offset = off.result();
1654
1655 assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");
1656 LIR_Address* addr;
1657 if (offset->is_constant()) {
1658 #ifdef _LP64
1659 jlong c = offset->as_jlong();
1660 if ((jlong)((jint)c) == c) {
1661 addr = new LIR_Address(src.result(), (jint)c, type);
1662 } else {
1663 LIR_Opr tmp = new_register(T_LONG);
1664 __ move(offset, tmp);
1665 addr = new LIR_Address(src.result(), tmp, type);
1666 }
1667 #else
1668 addr = new LIR_Address(src.result(), offset->as_jint(), type);
1669 #endif
1670 } else {
1671 addr = new LIR_Address(src.result(), offset, type);
1672 }
1673
1674 // Because we want a 2-arg form of xchg and xadd
1675 __ move(data, dst);
1676
1677 if (x->is_add()) {
1678 __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
1679 } else {
1680 if (is_obj) {
1681 // Do the pre-write barrier, if any.
1682 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1683 true /* do_load */, false /* patch */, NULL);
1684 }
1685 __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
1686 if (is_obj) {
1687 // Seems to be a precise address
1688 post_barrier(LIR_OprFact::address(addr), data);
1689 }
1690 }
1691 }
|
1447 CodeEmitInfo* info) {
1448 if (address->type() == T_LONG) {
1449 address = new LIR_Address(address->base(),
1450 address->index(), address->scale(),
1451 address->disp(), T_DOUBLE);
1452 // Transfer the value atomically by using FP moves. This means
1453 // the value has to be moved between CPU and FPU registers. It
1454 // always has to be moved through spill slot since there's no
1455 // quick way to pack the value into an SSE register.
1456 LIR_Opr temp_double = new_register(T_DOUBLE);
1457 LIR_Opr spill = new_register(T_LONG);
1458 set_vreg_flag(spill, must_start_in_memory);
1459 __ move(value, spill);
1460 __ volatile_move(spill, temp_double, T_LONG);
1461 __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info);
1462 } else {
1463 __ store(value, address, info);
1464 }
1465 }
1466
1467 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1468 CodeEmitInfo* info) {
1469 if (address->type() == T_LONG) {
1470 address = new LIR_Address(address->base(),
1471 address->index(), address->scale(),
1472 address->disp(), T_DOUBLE);
1473 // Transfer the value atomically by using FP moves. This means
1474 // the value has to be moved between CPU and FPU registers. In
1475 // SSE0 and SSE1 mode it has to be moved through spill slot but in
1476 // SSE2+ mode it can be moved directly.
1477 LIR_Opr temp_double = new_register(T_DOUBLE);
1478 __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info);
1479 __ volatile_move(temp_double, result, T_LONG);
1480 if (UseSSE < 2) {
1481 // no spill slot needed in SSE2 mode because xmm->cpu register move is possible
1482 set_vreg_flag(result, must_start_in_memory);
1483 }
1484 } else {
1485 __ load(address, result, info);
1486 }
1487 }
|