1 /*
2 * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
2193
2194 return null_check_offset;
2195 }
2196
2197
2198 void MacroAssembler::biased_locking_exit(Register obj_reg, Register tmp_reg, Label& done) {
2199 assert(UseBiasedLocking, "why call this otherwise?");
2200
2201 // Check for biased locking unlock case, which is a no-op
2202 // Note: we do not have to check the thread ID for two reasons.
2203 // First, the interpreter checks for IllegalMonitorStateException at
2204 // a higher level. Second, if the bias was revoked while we held the
2205 // lock, the object could not be rebiased toward another thread, so
2206 // the bias bit would be clear.
2207 ldr(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2208
2209 andr(tmp_reg, tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
2210 cmp(tmp_reg, markOopDesc::biased_lock_pattern);
2211 b(done, eq);
2212 }
2213
2214 #ifdef AARCH64
2215
2216 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
2217 switch (size_in_bytes) {
2218 case 8: ldr(dst, src); break;
2219 case 4: is_signed ? ldr_s32(dst, src) : ldr_u32(dst, src); break;
2220 case 2: is_signed ? ldrsh(dst, src) : ldrh(dst, src); break;
2221 case 1: is_signed ? ldrsb(dst, src) : ldrb(dst, src); break;
2222 default: ShouldNotReachHere();
2223 }
2224 }
2225
2226 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
2227 switch (size_in_bytes) {
2228 case 8: str(src, dst); break;
2229 case 4: str_32(src, dst); break;
2230 case 2: strh(src, dst); break;
2231 case 1: strb(src, dst); break;
2232 default: ShouldNotReachHere();
|
1 /*
2 * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
2193
2194 return null_check_offset;
2195 }
2196
2197
2198 void MacroAssembler::biased_locking_exit(Register obj_reg, Register tmp_reg, Label& done) {
2199 assert(UseBiasedLocking, "why call this otherwise?");
2200
2201 // Check for biased locking unlock case, which is a no-op
2202 // Note: we do not have to check the thread ID for two reasons.
2203 // First, the interpreter checks for IllegalMonitorStateException at
2204 // a higher level. Second, if the bias was revoked while we held the
2205 // lock, the object could not be rebiased toward another thread, so
2206 // the bias bit would be clear.
2207 ldr(tmp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2208
2209 andr(tmp_reg, tmp_reg, (uintx)markOopDesc::biased_lock_mask_in_place);
2210 cmp(tmp_reg, markOopDesc::biased_lock_pattern);
2211 b(done, eq);
2212 }
2213
2214
2215 void MacroAssembler::resolve_jobject(Register value,
2216 Register tmp1,
2217 Register tmp2) {
2218 assert_different_registers(value, tmp1, tmp2);
2219 Label done, not_weak;
2220 cbz(value, done); // Use NULL as-is.
2221 STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
2222 tbz(value, 0, not_weak); // Test for jweak tag.
2223 // Resolve jweak.
2224 ldr(value, Address(value, -JNIHandles::weak_tag_value));
2225 verify_oop(value);
2226 #if INCLUDE_ALL_GCS
2227 if (UseG1GC) {
2228 g1_write_barrier_pre(noreg, // store_addr
2229 noreg, // new_val
2230 value, // pre_val
2231 tmp1, // tmp1
2232 tmp2); // tmp2
2233 }
2234 #endif // INCLUDE_ALL_GCS
2235 b(done);
2236 bind(not_weak);
2237 // Resolve (untagged) jobject.
2238 ldr(value, Address(value));
2239 verify_oop(value);
2240 bind(done);
2241 }
2242
2243
2244 //////////////////////////////////////////////////////////////////////////////////
2245
2246 #if INCLUDE_ALL_GCS
2247
2248 // G1 pre-barrier.
2249 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
2250 // If store_addr != noreg, then previous value is loaded from [store_addr];
2251 // in such case store_addr and new_val registers are preserved;
2252 // otherwise pre_val register is preserved.
2253 void MacroAssembler::g1_write_barrier_pre(Register store_addr,
2254 Register new_val,
2255 Register pre_val,
2256 Register tmp1,
2257 Register tmp2) {
2258 Label done;
2259 Label runtime;
2260
2261 if (store_addr != noreg) {
2262 assert_different_registers(store_addr, new_val, pre_val, tmp1, tmp2, noreg);
2263 } else {
2264 assert (new_val == noreg, "should be");
2265 assert_different_registers(pre_val, tmp1, tmp2, noreg);
2266 }
2267
2268 Address in_progress(Rthread, in_bytes(JavaThread::satb_mark_queue_offset() +
2269 SATBMarkQueue::byte_offset_of_active()));
2270 Address index(Rthread, in_bytes(JavaThread::satb_mark_queue_offset() +
2271 SATBMarkQueue::byte_offset_of_index()));
2272 Address buffer(Rthread, in_bytes(JavaThread::satb_mark_queue_offset() +
2273 SATBMarkQueue::byte_offset_of_buf()));
2274
2275 // Is marking active?
2276 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "adjust this code");
2277 ldrb(tmp1, in_progress);
2278 cbz(tmp1, done);
2279
2280 // Do we need to load the previous value?
2281 if (store_addr != noreg) {
2282 load_heap_oop(pre_val, Address(store_addr, 0));
2283 }
2284
2285 // Is the previous value null?
2286 cbz(pre_val, done);
2287
2288 // Can we store original value in the thread's buffer?
2289 // Is index == 0?
2290 // (The index field is typed as size_t.)
2291
2292 ldr(tmp1, index); // tmp1 := *index_adr
2293 ldr(tmp2, buffer);
2294
2295 subs(tmp1, tmp1, wordSize); // tmp1 := tmp1 - wordSize
2296 b(runtime, lt); // If negative, goto runtime
2297
2298 str(tmp1, index); // *index_adr := tmp1
2299
2300 // Record the previous value
2301 str(pre_val, Address(tmp2, tmp1));
2302 b(done);
2303
2304 bind(runtime);
2305
2306 // save the live input values
2307 #ifdef AARCH64
2308 if (store_addr != noreg) {
2309 raw_push(store_addr, new_val);
2310 } else {
2311 raw_push(pre_val, ZR);
2312 }
2313 #else
2314 if (store_addr != noreg) {
2315 // avoid raw_push to support any ordering of store_addr and new_val
2316 push(RegisterSet(store_addr) | RegisterSet(new_val));
2317 } else {
2318 push(pre_val);
2319 }
2320 #endif // AARCH64
2321
2322 if (pre_val != R0) {
2323 mov(R0, pre_val);
2324 }
2325 mov(R1, Rthread);
2326
2327 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), R0, R1);
2328
2329 #ifdef AARCH64
2330 if (store_addr != noreg) {
2331 raw_pop(store_addr, new_val);
2332 } else {
2333 raw_pop(pre_val, ZR);
2334 }
2335 #else
2336 if (store_addr != noreg) {
2337 pop(RegisterSet(store_addr) | RegisterSet(new_val));
2338 } else {
2339 pop(pre_val);
2340 }
2341 #endif // AARCH64
2342
2343 bind(done);
2344 }
2345
2346 // G1 post-barrier.
2347 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
2348 void MacroAssembler::g1_write_barrier_post(Register store_addr,
2349 Register new_val,
2350 Register tmp1,
2351 Register tmp2,
2352 Register tmp3) {
2353
2354 Address queue_index(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() +
2355 DirtyCardQueue::byte_offset_of_index()));
2356 Address buffer(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() +
2357 DirtyCardQueue::byte_offset_of_buf()));
2358
2359 BarrierSet* bs = Universe::heap()->barrier_set();
2360 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
2361 Label done;
2362 Label runtime;
2363
2364 // Does store cross heap regions?
2365
2366 eor(tmp1, store_addr, new_val);
2367 #ifdef AARCH64
2368 logical_shift_right(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
2369 cbz(tmp1, done);
2370 #else
2371 movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
2372 b(done, eq);
2373 #endif
2374
2375 // crosses regions, storing NULL?
2376
2377 cbz(new_val, done);
2378
2379 // storing region crossing non-NULL, is card already dirty?
2380 const Register card_addr = tmp1;
2381 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
2382
2383 mov_address(tmp2, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
2384 add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTableModRefBS::card_shift));
2385
2386 ldrb(tmp2, Address(card_addr));
2387 cmp(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
2388 b(done, eq);
2389
2390 membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
2391
2392 assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
2393 ldrb(tmp2, Address(card_addr));
2394 cbz(tmp2, done);
2395
2396 // storing a region crossing, non-NULL oop, card is clean.
2397 // dirty card and log.
2398
2399 strb(zero_register(tmp2), Address(card_addr));
2400
2401 ldr(tmp2, queue_index);
2402 ldr(tmp3, buffer);
2403
2404 subs(tmp2, tmp2, wordSize);
2405 b(runtime, lt); // go to runtime if now negative
2406
2407 str(tmp2, queue_index);
2408
2409 str(card_addr, Address(tmp3, tmp2));
2410 b(done);
2411
2412 bind(runtime);
2413
2414 if (card_addr != R0) {
2415 mov(R0, card_addr);
2416 }
2417 mov(R1, Rthread);
2418 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), R0, R1);
2419
2420 bind(done);
2421 }
2422
2423 #endif // INCLUDE_ALL_GCS
2424
2425 //////////////////////////////////////////////////////////////////////////////////
2426
2427 #ifdef AARCH64
2428
2429 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
2430 switch (size_in_bytes) {
2431 case 8: ldr(dst, src); break;
2432 case 4: is_signed ? ldr_s32(dst, src) : ldr_u32(dst, src); break;
2433 case 2: is_signed ? ldrsh(dst, src) : ldrh(dst, src); break;
2434 case 1: is_signed ? ldrsb(dst, src) : ldrb(dst, src); break;
2435 default: ShouldNotReachHere();
2436 }
2437 }
2438
2439 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
2440 switch (size_in_bytes) {
2441 case 8: str(src, dst); break;
2442 case 4: str_32(src, dst); break;
2443 case 2: strh(src, dst); break;
2444 case 1: strb(src, dst); break;
2445 default: ShouldNotReachHere();
|