0 /*
1 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
2 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
|
0 /*
1 * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
2 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
|
126 return false;
127 }
128 }
129
130
131 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
132
133
134 LIR_Opr LIRGenerator::safepoint_poll_register() {
135 return LIR_OprFact::illegalOpr;
136 }
137
138
139 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
140 int shift, int disp, BasicType type) {
141 assert(base->is_register(), "must be");
142 intx large_disp = disp;
143
144 // accumulate fixed displacements
145 if (index->is_constant()) {
146 large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;
147 index = LIR_OprFact::illegalOpr;
148 }
149
150 if (index->is_register()) {
151 // apply the shift and accumulate the displacement
152 if (shift > 0) {
153 LIR_Opr tmp = new_pointer_register();
154 __ shift_left(index, shift, tmp);
155 index = tmp;
156 }
157 if (large_disp != 0) {
158 LIR_Opr tmp = new_pointer_register();
159 if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
160 __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp));
161 index = tmp;
162 } else {
163 __ move(tmp, LIR_OprFact::intptrConst(large_disp));
164 __ add(tmp, index, tmp);
165 index = tmp;
166 }
167 large_disp = 0;
168 }
169 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
170 // index is illegal so replace it with the displacement loaded into a register
171 index = new_pointer_register();
172 __ move(LIR_OprFact::intptrConst(large_disp), index);
173 large_disp = 0;
174 }
175
176 // at this point we either have base + index or base + displacement
177 if (large_disp == 0) {
178 return new LIR_Address(base, index, type);
179 } else {
180 assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
181 return new LIR_Address(base, large_disp, type);
182 }
183 }
184
185
186 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
187 BasicType type, bool needs_card_mark) {
188 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
189 int elem_size = type2aelembytes(type);
190 int shift = exact_log2(elem_size);
191
192 LIR_Address* addr;
193 if (index_opr->is_constant()) {
194 addr = new LIR_Address(array_opr,
195 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
196 } else {
197 if (offset_in_bytes) {
198 LIR_Opr tmp = new_pointer_register();
199 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
200 array_opr = tmp;
201 offset_in_bytes = 0;
202 }
203 addr = new LIR_Address(array_opr,
204 index_opr,
205 LIR_Address::scale(type),
206 offset_in_bytes, type);
207 }
208 if (needs_card_mark) {
209 // This store will need a precise card mark, so go ahead and
210 // compute the full adddres instead of computing once for the
211 // store and again for the card mark.
212 LIR_Opr tmp = new_pointer_register();
213 __ leal(LIR_OprFact::address(addr), tmp);
214 return new LIR_Address(tmp, type);
215 } else {
216 return addr;
217 }
218 }
219
220 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
221 LIR_Opr r;
222 if (type == T_LONG) {
223 r = LIR_OprFact::longConst(x);
224 if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
225 LIR_Opr tmp = new_register(type);
226 __ move(r, tmp);
227 return tmp;
228 }
229 } else if (type == T_INT) {
230 r = LIR_OprFact::intConst(x);
231 if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
232 // This is all rather nasty. We don't know whether our constant
233 // is required for a logical or an arithmetic operation, wo we
234 // don't know what the range of valid values is!!
235 LIR_Opr tmp = new_register(type);
236 __ move(r, tmp);
|
126 return false;
127 }
128 }
129
130
131 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
132
133
134 LIR_Opr LIRGenerator::safepoint_poll_register() {
135 return LIR_OprFact::illegalOpr;
136 }
137
138
139 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
140 int shift, int disp, BasicType type) {
141 assert(base->is_register(), "must be");
142 intx large_disp = disp;
143
144 // accumulate fixed displacements
145 if (index->is_constant()) {
146 LIR_Const *constant = index->as_constant_ptr();
147 if (constant->type() == T_INT) {
148 large_disp += index->as_jint() << shift;
149 } else {
150 assert(constant->type() == T_LONG, "should be");
151 jlong c = index->as_jlong() << shift;
152 if ((jlong)((jint)c) == c) {
153 large_disp += c;
154 index = LIR_OprFact::illegalOpr;
155 } else {
156 LIR_Opr tmp = new_register(T_LONG);
157 __ move(index, tmp);
158 index = tmp;
159 // apply shift and displacement below
160 }
161 }
162 }
163
164 if (index->is_register()) {
165 // apply the shift and accumulate the displacement
166 if (shift > 0) {
167 LIR_Opr tmp = new_pointer_register();
168 __ shift_left(index, shift, tmp);
169 index = tmp;
170 }
171 if (large_disp != 0) {
172 LIR_Opr tmp = new_pointer_register();
173 if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
174 __ add(tmp, tmp, LIR_OprFact::intptrConst(large_disp));
175 index = tmp;
176 } else {
177 __ move(tmp, LIR_OprFact::intptrConst(large_disp));
178 __ add(tmp, index, tmp);
179 index = tmp;
180 }
181 large_disp = 0;
182 }
183 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
184 // index is illegal so replace it with the displacement loaded into a register
185 index = new_pointer_register();
186 __ move(LIR_OprFact::intptrConst(large_disp), index);
187 large_disp = 0;
188 }
189
190 // at this point we either have base + index or base + displacement
191 if (large_disp == 0) {
192 return new LIR_Address(base, index, type);
193 } else {
194 assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
195 return new LIR_Address(base, large_disp, type);
196 }
197 }
198
199 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
200 BasicType type) {
201 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
202 int elem_size = type2aelembytes(type);
203 int shift = exact_log2(elem_size);
204
205 LIR_Address* addr;
206 if (index_opr->is_constant()) {
207 addr = new LIR_Address(array_opr,
208 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
209 } else {
210 if (offset_in_bytes) {
211 LIR_Opr tmp = new_pointer_register();
212 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
213 array_opr = tmp;
214 offset_in_bytes = 0;
215 }
216 addr = new LIR_Address(array_opr,
217 index_opr,
218 LIR_Address::scale(type),
219 offset_in_bytes, type);
220 }
221 return addr;
222 }
223
224 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
225 LIR_Opr r;
226 if (type == T_LONG) {
227 r = LIR_OprFact::longConst(x);
228 if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
229 LIR_Opr tmp = new_register(type);
230 __ move(r, tmp);
231 return tmp;
232 }
233 } else if (type == T_INT) {
234 r = LIR_OprFact::intConst(x);
235 if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
236 // This is all rather nasty. We don't know whether our constant
237 // is required for a logical or an arithmetic operation, wo we
238 // don't know what the range of valid values is!!
239 LIR_Opr tmp = new_register(type);
240 __ move(r, tmp);
|
287 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
288
289 if (is_power_of_2(c - 1)) {
290 __ shift_left(left, exact_log2(c - 1), tmp);
291 __ add(tmp, left, result);
292 return true;
293 } else if (is_power_of_2(c + 1)) {
294 __ shift_left(left, exact_log2(c + 1), tmp);
295 __ sub(tmp, left, result);
296 return true;
297 } else {
298 return false;
299 }
300 }
301
302 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
303 BasicType type = item->type();
304 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
305 }
306
307 //----------------------------------------------------------------------
308 // visitor functions
309 //----------------------------------------------------------------------
310
311
312 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
313 assert(x->is_pinned(),"");
314 bool needs_range_check = x->compute_needs_range_check();
315 bool use_length = x->length() != NULL;
316 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
317 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
318 !get_jobject_constant(x->value())->is_null_object() ||
319 x->should_profile());
320
321 LIRItem array(x->array(), this);
322 LIRItem index(x->index(), this);
323 LIRItem value(x->value(), this);
324 LIRItem length(this);
325
326 array.load_item();
327 index.load_nonconstant();
328
329 if (use_length && needs_range_check) {
330 length.set_instruction(x->length());
331 length.load_item();
332
333 }
334 if (needs_store_check || x->check_boolean()) {
335 value.load_item();
336 } else {
337 value.load_for_store(x->elt_type());
338 }
339
340 set_no_result(x);
341
342 // the CodeEmitInfo must be duplicated for each different
343 // LIR-instruction because spilling can occur anywhere between two
344 // instructions and so the debug information must be different
345 CodeEmitInfo* range_check_info = state_for(x);
346 CodeEmitInfo* null_check_info = NULL;
347 if (x->needs_null_check()) {
348 null_check_info = new CodeEmitInfo(range_check_info);
349 }
350
351 // emit array address setup early so it schedules better
352 // FIXME? No harm in this on aarch64, and it might help
353 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
354
355 if (GenerateRangeChecks && needs_range_check) {
356 if (use_length) {
357 __ cmp(lir_cond_belowEqual, length.result(), index.result());
358 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
359 } else {
360 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
361 // range_check also does the null check
362 null_check_info = NULL;
363 }
364 }
365
366 if (GenerateArrayStoreCheck && needs_store_check) {
367 LIR_Opr tmp1 = new_register(objectType);
368 LIR_Opr tmp2 = new_register(objectType);
369 LIR_Opr tmp3 = new_register(objectType);
370
371 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
372 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
373 }
374
375 if (obj_store) {
376 // Needs GC write barriers.
377 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
378 true /* do_load */, false /* patch */, NULL);
379 __ move(value.result(), array_addr, null_check_info);
380 // Seems to be a precise
381 post_barrier(LIR_OprFact::address(array_addr), value.result());
382 } else {
383 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
384 __ move(result, array_addr, null_check_info);
385 }
386 }
387
388 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
389 assert(x->is_pinned(),"");
390 LIRItem obj(x->obj(), this);
391 obj.load_item();
392
393 set_no_result(x);
394
395 // "lock" stores the address of the monitor stack slot, so this is not an oop
396 LIR_Opr lock = new_register(T_INT);
397 // Need a scratch register for biased locking
398 LIR_Opr scratch = LIR_OprFact::illegalOpr;
399 if (UseBiasedLocking) {
400 scratch = new_register(T_INT);
401 }
402
403 CodeEmitInfo* info_for_exception = NULL;
404 if (x->needs_null_check()) {
405 info_for_exception = state_for(x);
406 }
|
291 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
292
293 if (is_power_of_2(c - 1)) {
294 __ shift_left(left, exact_log2(c - 1), tmp);
295 __ add(tmp, left, result);
296 return true;
297 } else if (is_power_of_2(c + 1)) {
298 __ shift_left(left, exact_log2(c + 1), tmp);
299 __ sub(tmp, left, result);
300 return true;
301 } else {
302 return false;
303 }
304 }
305
306 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
307 BasicType type = item->type();
308 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
309 }
310
311 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int pro
312 LIR_Opr tmp1 = new_register(objectType);
313 LIR_Opr tmp2 = new_register(objectType);
314 LIR_Opr tmp3 = new_register(objectType);
315 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
316 }
317
318 //----------------------------------------------------------------------
319 // visitor functions
320 //----------------------------------------------------------------------
321
322 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
323 assert(x->is_pinned(),"");
324 LIRItem obj(x->obj(), this);
325 obj.load_item();
326
327 set_no_result(x);
328
329 // "lock" stores the address of the monitor stack slot, so this is not an oop
330 LIR_Opr lock = new_register(T_INT);
331 // Need a scratch register for biased locking
332 LIR_Opr scratch = LIR_OprFact::illegalOpr;
333 if (UseBiasedLocking) {
334 scratch = new_register(T_INT);
335 }
336
337 CodeEmitInfo* info_for_exception = NULL;
338 if (x->needs_null_check()) {
339 info_for_exception = state_for(x);
340 }
|
753 LIRItem left(x->x(), this);
754 LIRItem right(x->y(), this);
755 ValueTag tag = x->x()->type()->tag();
756 if (tag == longTag) {
757 left.set_destroys_register();
758 }
759 left.load_item();
760 right.load_item();
761 LIR_Opr reg = rlock_result(x);
762
763 if (x->x()->type()->is_float_kind()) {
764 Bytecodes::Code code = x->op();
765 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
766 } else if (x->x()->type()->tag() == longTag) {
767 __ lcmp2int(left.result(), right.result(), reg);
768 } else {
769 Unimplemented();
770 }
771 }
772
773 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
774 assert(x->number_of_arguments() == 4, "wrong type");
775 LIRItem obj (x->argument_at(0), this); // object
776 LIRItem offset(x->argument_at(1), this); // offset of field
777 LIRItem cmp (x->argument_at(2), this); // value to compare with field
778 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
779
780 assert(obj.type()->tag() == objectTag, "invalid type");
781
782 // In 64bit the type can be long, sparc doesn't have this assert
783 // assert(offset.type()->tag() == intTag, "invalid type");
784
785 assert(cmp.type()->tag() == type->tag(), "invalid type");
786 assert(val.type()->tag() == type->tag(), "invalid type");
787
788 // get address of field
789 obj.load_item();
790 offset.load_nonconstant();
791 val.load_item();
792 cmp.load_item();
793
794 LIR_Address* a;
795 if(offset.result()->is_constant()) {
796 jlong c = offset.result()->as_jlong();
797 if ((jlong)((jint)c) == c) {
798 a = new LIR_Address(obj.result(),
799 (jint)c,
800 as_BasicType(type));
801 } else {
802 LIR_Opr tmp = new_register(T_LONG);
803 __ move(offset.result(), tmp);
804 a = new LIR_Address(obj.result(),
805 tmp,
806 as_BasicType(type));
807 }
808 } else {
809 a = new LIR_Address(obj.result(),
810 offset.result(),
811 0,
812 as_BasicType(type));
813 }
814 LIR_Opr addr = new_pointer_register();
815 __ leal(LIR_OprFact::address(a), addr);
816
817 if (type == objectType) { // Write-barrier needed for Object fields.
818 // Do the pre-write barrier, if any.
819 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
820 true /* do_load */, false /* patch */, NULL);
821 }
822
823 LIR_Opr result = rlock_result(x);
824
825 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
826 if (type == objectType)
827 __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
828 result);
829 else if (type == intType)
830 __ cas_int(addr, cmp.result(), val.result(), ill, ill);
831 else if (type == longType)
832 __ cas_long(addr, cmp.result(), val.result(), ill, ill);
833 else {
834 ShouldNotReachHere();
835 }
836
837 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
838
839 if (type == objectType) { // Write-barrier needed for Object fields.
840 // Seems to be precise
841 post_barrier(addr, val.result());
842 }
843 }
844
845 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
846 switch (x->id()) {
847 case vmIntrinsics::_dabs:
848 case vmIntrinsics::_dsqrt: {
849 assert(x->number_of_arguments() == 1, "wrong type");
850 LIRItem value(x->argument_at(0), this);
851 value.load_item();
852 LIR_Opr dst = rlock_result(x);
853
854 switch (x->id()) {
855 case vmIntrinsics::_dsqrt: {
856 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
857 break;
858 }
859 case vmIntrinsics::_dabs: {
860 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
861 break;
|
687 LIRItem left(x->x(), this);
688 LIRItem right(x->y(), this);
689 ValueTag tag = x->x()->type()->tag();
690 if (tag == longTag) {
691 left.set_destroys_register();
692 }
693 left.load_item();
694 right.load_item();
695 LIR_Opr reg = rlock_result(x);
696
697 if (x->x()->type()->is_float_kind()) {
698 Bytecodes::Code code = x->op();
699 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
700 } else if (x->x()->type()->tag() == longTag) {
701 __ lcmp2int(left.result(), right.result(), reg);
702 } else {
703 Unimplemented();
704 }
705 }
706
707 LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
708 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
709 new_value.load_item();
710 cmp_value.load_item();
711 LIR_Opr result = new_register(T_INT);
712 if (type == T_OBJECT || type == T_ARRAY) {
713 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
714 } else if (type == T_INT) {
715 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
716 } else if (type == T_LONG) {
717 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
718 } else {
719 ShouldNotReachHere();
720 Unimplemented();
721 }
722 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
723 return result;
724 }
725
726 LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
727 bool is_oop = type == T_OBJECT || type == T_ARRAY;
728 LIR_Opr result = new_register(type);
729 value.load_item();
730 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
731 LIR_Opr tmp = new_register(T_INT);
732 __ xchg(addr, value.result(), result, tmp);
733 return result;
734 }
735
736 LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
737 LIR_Opr result = new_register(type);
738 value.load_item();
739 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
740 LIR_Opr tmp = new_register(T_INT);
741 __ xadd(addr, value.result(), result, tmp);
742 return result;
743 }
744
745 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
746 switch (x->id()) {
747 case vmIntrinsics::_dabs:
748 case vmIntrinsics::_dsqrt: {
749 assert(x->number_of_arguments() == 1, "wrong type");
750 LIRItem value(x->argument_at(0), this);
751 value.load_item();
752 LIR_Opr dst = rlock_result(x);
753
754 switch (x->id()) {
755 case vmIntrinsics::_dsqrt: {
756 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
757 break;
758 }
759 case vmIntrinsics::_dabs: {
760 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
761 break;
|
1414 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1415
1416 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1417 CodeEmitInfo* info) {
1418 __ volatile_store_mem_reg(value, address, info);
1419 }
1420
1421 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1422 CodeEmitInfo* info) {
1423 // 8179954: We need to make sure that the code generated for
1424 // volatile accesses forms a sequentially-consistent set of
1425 // operations when combined with STLR and LDAR. Without a leading
1426 // membar it's possible for a simple Dekker test to fail if loads
1427 // use LD;DMB but stores use STLR. This can happen if C2 compiles
1428 // the stores in one method and C1 compiles the loads in another.
1429 if (! UseBarriersForVolatile) {
1430 __ membar();
1431 }
1432
1433 __ volatile_load_mem_reg(address, result, info);
1434 }
1435
1436 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1437 BasicType type, bool is_volatile) {
1438 LIR_Address* addr = new LIR_Address(src, offset, type);
1439 __ load(addr, dst);
1440 }
1441
1442
1443 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1444 BasicType type, bool is_volatile) {
1445 LIR_Address* addr = new LIR_Address(src, offset, type);
1446 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1447 if (is_obj) {
1448 // Do the pre-write barrier, if any.
1449 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1450 true /* do_load */, false /* patch */, NULL);
1451 __ move(data, addr);
1452 assert(src->is_register(), "must be register");
1453 // Seems to be a precise address
1454 post_barrier(LIR_OprFact::address(addr), data);
1455 } else {
1456 __ move(data, addr);
1457 }
1458 }
1459
1460 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1461 BasicType type = x->basic_type();
1462 LIRItem src(x->object(), this);
1463 LIRItem off(x->offset(), this);
1464 LIRItem value(x->value(), this);
1465
1466 src.load_item();
1467 off.load_nonconstant();
1468
1469 // We can cope with a constant increment in an xadd
1470 if (! (x->is_add()
1471 && value.is_constant()
1472 && can_inline_as_constant(x->value()))) {
1473 value.load_item();
1474 }
1475
1476 LIR_Opr dst = rlock_result(x, type);
1477 LIR_Opr data = value.result();
1478 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1479 LIR_Opr offset = off.result();
1480
1481 if (data == dst) {
1482 LIR_Opr tmp = new_register(data->type());
1483 __ move(data, tmp);
1484 data = tmp;
1485 }
1486
1487 LIR_Address* addr;
1488 if (offset->is_constant()) {
1489 jlong l = offset->as_jlong();
1490 assert((jlong)((jint)l) == l, "offset too large for constant");
1491 jint c = (jint)l;
1492 addr = new LIR_Address(src.result(), c, type);
1493 } else {
1494 addr = new LIR_Address(src.result(), offset, type);
1495 }
1496
1497 LIR_Opr tmp = new_register(T_INT);
1498 LIR_Opr ptr = LIR_OprFact::illegalOpr;
1499
1500 if (x->is_add()) {
1501 __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
1502 } else {
1503 if (is_obj) {
1504 // Do the pre-write barrier, if any.
1505 ptr = new_pointer_register();
1506 __ add(src.result(), off.result(), ptr);
1507 pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1508 true /* do_load */, false /* patch */, NULL);
1509 }
1510 __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1511 if (is_obj) {
1512 post_barrier(ptr, data);
1513 }
1514 }
1515 }
|
1314 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1315
1316 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1317 CodeEmitInfo* info) {
1318 __ volatile_store_mem_reg(value, address, info);
1319 }
1320
1321 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1322 CodeEmitInfo* info) {
1323 // 8179954: We need to make sure that the code generated for
1324 // volatile accesses forms a sequentially-consistent set of
1325 // operations when combined with STLR and LDAR. Without a leading
1326 // membar it's possible for a simple Dekker test to fail if loads
1327 // use LD;DMB but stores use STLR. This can happen if C2 compiles
1328 // the stores in one method and C1 compiles the loads in another.
1329 if (! UseBarriersForVolatile) {
1330 __ membar();
1331 }
1332
1333 __ volatile_load_mem_reg(address, result, info);
1334 }
|