110 // Safepoint check
111 InlinedAddress safepoint_counter_addr(SafepointSynchronize::safepoint_counter_addr());
112 Label slow_case;
113 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
114
115 #ifndef AARCH64
116 __ push(RegisterSet(R0, R3)); // save incoming arguments for slow case
117 #endif // !AARCH64
118
119 __ ldr_s32(Rsafept_cnt, Address(Rsafepoint_counter_addr));
120 __ tbnz(Rsafept_cnt, 0, slow_case);
121
122 #ifdef AARCH64
123 // If mask changes we need to ensure that the inverse is still encodable as an immediate
124 STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
125 __ andr(R1, R1, ~JNIHandles::weak_tag_mask);
126 #else
127 __ bic(R1, R1, JNIHandles::weak_tag_mask);
128 #endif
129
130 if (os::is_MP()) {
131 // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
132 __ andr(Rtmp1, Rsafept_cnt, (unsigned)1);
133 __ ldr(Robj, Address(R1, Rtmp1));
134 } else {
135 __ ldr(Robj, Address(R1));
136 }
137
138 #ifdef AARCH64
139 __ add(Robj, Robj, AsmOperand(R2, lsr, 2));
140 Address field_addr = Address(Robj);
141 #else
142 Address field_addr;
143 if (type != T_BOOLEAN
144 && type != T_INT
145 #ifndef __ABI_HARD__
146 && type != T_FLOAT
147 #endif // !__ABI_HARD__
148 ) {
149 // Only ldr and ldrb support embedded shift, other loads do not
150 __ add(Robj, Robj, AsmOperand(R2, lsr, 2));
151 field_addr = Address(Robj);
152 } else {
153 field_addr = Address(Robj, R2, lsr, 2);
154 }
155 #endif // AARCH64
156 assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
181 #endif
182 #ifdef AARCH64
183 __ ldr(Rres, field_addr);
184 #else
185 // Safe to use ldrd since long and double fields are 8-byte aligned
186 __ ldrd(Rres, field_addr);
187 #endif // AARCH64
188 break;
189 #ifdef __ABI_HARD__
190 case T_FLOAT:
191 __ ldr_float(S0, field_addr);
192 break;
193 case T_DOUBLE:
194 __ ldr_double(D0, field_addr);
195 break;
196 #endif // __ABI_HARD__
197 default:
198 ShouldNotReachHere();
199 }
200
201 if(os::is_MP()) {
202 // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
203 #if defined(__ABI_HARD__) && !defined(AARCH64)
204 if (type == T_FLOAT || type == T_DOUBLE) {
205 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
206 __ fmrrd(Rres, Rres_hi, D0);
207 __ eor(Rtmp2, Rres, Rres);
208 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
209 } else
210 #endif // __ABI_HARD__ && !AARCH64
211 {
212 #ifndef AARCH64
213 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
214 #endif // !AARCH64
215 __ eor(Rtmp2, Rres, Rres);
216 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
217 }
218 } else {
219 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr));
220 }
221 __ cmp(Rsafept_cnt2, Rsafept_cnt);
222 #ifdef AARCH64
223 __ b(slow_case, ne);
224 __ mov(R0, Rres);
225 __ ret();
226 #else
227 // discards saved R0 R1 R2 R3
228 __ add(SP, SP, 4 * wordSize, eq);
229 __ bx(LR, eq);
230 #endif // AARCH64
231
232 slowcase_entry_pclist[count++] = __ pc();
233
234 __ bind(slow_case);
235 #ifndef AARCH64
236 __ pop(RegisterSet(R0, R3));
237 #endif // !AARCH64
238 // thumb mode switch handled by MacroAssembler::jump if needed
239 __ jump(slow_case_addr, relocInfo::none, Rtemp);
240
|
110 // Safepoint check
111 InlinedAddress safepoint_counter_addr(SafepointSynchronize::safepoint_counter_addr());
112 Label slow_case;
113 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
114
115 #ifndef AARCH64
116 __ push(RegisterSet(R0, R3)); // save incoming arguments for slow case
117 #endif // !AARCH64
118
119 __ ldr_s32(Rsafept_cnt, Address(Rsafepoint_counter_addr));
120 __ tbnz(Rsafept_cnt, 0, slow_case);
121
122 #ifdef AARCH64
123 // If mask changes we need to ensure that the inverse is still encodable as an immediate
124 STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
125 __ andr(R1, R1, ~JNIHandles::weak_tag_mask);
126 #else
127 __ bic(R1, R1, JNIHandles::weak_tag_mask);
128 #endif
129
130 // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
131 __ andr(Rtmp1, Rsafept_cnt, (unsigned)1);
132 __ ldr(Robj, Address(R1, Rtmp1));
133
134 #ifdef AARCH64
135 __ add(Robj, Robj, AsmOperand(R2, lsr, 2));
136 Address field_addr = Address(Robj);
137 #else
138 Address field_addr;
139 if (type != T_BOOLEAN
140 && type != T_INT
141 #ifndef __ABI_HARD__
142 && type != T_FLOAT
143 #endif // !__ABI_HARD__
144 ) {
145 // Only ldr and ldrb support embedded shift, other loads do not
146 __ add(Robj, Robj, AsmOperand(R2, lsr, 2));
147 field_addr = Address(Robj);
148 } else {
149 field_addr = Address(Robj, R2, lsr, 2);
150 }
151 #endif // AARCH64
152 assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
177 #endif
178 #ifdef AARCH64
179 __ ldr(Rres, field_addr);
180 #else
181 // Safe to use ldrd since long and double fields are 8-byte aligned
182 __ ldrd(Rres, field_addr);
183 #endif // AARCH64
184 break;
185 #ifdef __ABI_HARD__
186 case T_FLOAT:
187 __ ldr_float(S0, field_addr);
188 break;
189 case T_DOUBLE:
190 __ ldr_double(D0, field_addr);
191 break;
192 #endif // __ABI_HARD__
193 default:
194 ShouldNotReachHere();
195 }
196
197 // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
198 #if defined(__ABI_HARD__) && !defined(AARCH64)
199 if (type == T_FLOAT || type == T_DOUBLE) {
200 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
201 __ fmrrd(Rres, Rres_hi, D0);
202 __ eor(Rtmp2, Rres, Rres);
203 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
204 } else
205 #endif // __ABI_HARD__ && !AARCH64
206 {
207 #ifndef AARCH64
208 __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
209 #endif // !AARCH64
210 __ eor(Rtmp2, Rres, Rres);
211 __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
212 }
213
214 __ cmp(Rsafept_cnt2, Rsafept_cnt);
215 #ifdef AARCH64
216 __ b(slow_case, ne);
217 __ mov(R0, Rres);
218 __ ret();
219 #else
220 // discards saved R0 R1 R2 R3
221 __ add(SP, SP, 4 * wordSize, eq);
222 __ bx(LR, eq);
223 #endif // AARCH64
224
225 slowcase_entry_pclist[count++] = __ pc();
226
227 __ bind(slow_case);
228 #ifndef AARCH64
229 __ pop(RegisterSet(R0, R3));
230 #endif // !AARCH64
231 // thumb mode switch handled by MacroAssembler::jump if needed
232 __ jump(slow_case_addr, relocInfo::none, Rtemp);
233
|