7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "gc/g1/g1BarrierSet.hpp"
28 #include "gc/g1/g1CardTable.hpp"
29 #include "gc/g1/g1BarrierSetAssembler.hpp"
30 #include "gc/g1/g1ThreadLocalData.hpp"
31 #include "gc/g1/heapRegion.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "runtime/sharedRuntime.hpp"
34
35 #define __ masm->
36
37 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
38 Register from, Register to, Register count,
39 Register preserve1, Register preserve2) {
40 bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
41 // With G1, don't generate the call if we statically know that the target in uninitialized
42 if (!dest_uninitialized) {
43 int spill_slots = 3;
44 if (preserve1 != noreg) { spill_slots++; }
45 if (preserve2 != noreg) { spill_slots++; }
46 const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
47 Label filtered;
48
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "c1/c1_LIRAssembler.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "gc/g1/c1/g1BarrierSetC1.hpp"
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1BarrierSetAssembler.hpp"
32 #include "gc/g1/g1CardTable.hpp"
33 #include "gc/g1/g1ThreadLocalData.hpp"
34 #include "gc/g1/heapRegion.hpp"
35 #include "interpreter/interp_masm.hpp"
36 #include "runtime/sharedRuntime.hpp"
37
38 #define __ masm->
39
40 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
41 Register from, Register to, Register count,
42 Register preserve1, Register preserve2) {
43 bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
44 // With G1, don't generate the call if we statically know that the target in uninitialized
45 if (!dest_uninitialized) {
46 int spill_slots = 3;
47 if (preserve1 != noreg) { spill_slots++; }
48 if (preserve2 != noreg) { spill_slots++; }
49 const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
50 Label filtered;
51
|
319 __ bind(done);
320 }
321
322 void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2, bool needs_frame) {
323 Label done, not_weak;
324 __ cmpdi(CCR0, value, 0);
325 __ beq(CCR0, done); // Use NULL as-is.
326
327 __ clrrdi(tmp1, value, JNIHandles::weak_tag_size);
328 __ andi_(tmp2, value, JNIHandles::weak_tag_mask);
329 __ ld(value, 0, tmp1); // Resolve (untagged) jobject.
330
331 __ beq(CCR0, not_weak); // Test for jweak tag.
332 __ verify_oop(value);
333 g1_write_barrier_pre(masm, IN_ROOT | ON_PHANTOM_OOP_REF,
334 noreg, noreg, value,
335 tmp1, tmp2, needs_frame);
336 __ bind(not_weak);
337 __ verify_oop(value);
338 __ bind(done);
339 }
340
341 #undef __
|
322 __ bind(done);
323 }
324
325 void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2, bool needs_frame) {
326 Label done, not_weak;
327 __ cmpdi(CCR0, value, 0);
328 __ beq(CCR0, done); // Use NULL as-is.
329
330 __ clrrdi(tmp1, value, JNIHandles::weak_tag_size);
331 __ andi_(tmp2, value, JNIHandles::weak_tag_mask);
332 __ ld(value, 0, tmp1); // Resolve (untagged) jobject.
333
334 __ beq(CCR0, not_weak); // Test for jweak tag.
335 __ verify_oop(value);
336 g1_write_barrier_pre(masm, IN_ROOT | ON_PHANTOM_OOP_REF,
337 noreg, noreg, value,
338 tmp1, tmp2, needs_frame);
339 __ bind(not_weak);
340 __ verify_oop(value);
341 __ bind(done);
342 }
343
344 #undef __
345 #define __ ce->masm()->
346
347 void G1BarrierSetAssembler::gen_g1_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
348 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
349 // At this point we know that marking is in progress.
350 // If do_load() is true then we have to emit the
351 // load of the previous value; otherwise it has already
352 // been loaded into _pre_val.
353
354 __ bind(*stub->entry());
355
356 assert(stub->pre_val()->is_register(), "Precondition.");
357 Register pre_val_reg = stub->pre_val()->as_register();
358
359 if (stub->do_load()) {
360 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
361 }
362
363 __ cmpdi(CCR0, pre_val_reg, 0);
364 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
365
366 address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
367 //__ load_const_optimized(R0, c_code);
368 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
369 __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
370 __ mtctr(R0);
371 __ bctrl();
372 __ b(*stub->continuation());
373 }
374
375 void G1BarrierSetAssembler::gen_g1_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
376 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
377 __ bind(*stub->entry());
378
379 assert(stub->addr()->is_register(), "Precondition.");
380 assert(stub->new_val()->is_register(), "Precondition.");
381 Register addr_reg = stub->addr()->as_pointer_register();
382 Register new_val_reg = stub->new_val()->as_register();
383
384 __ cmpdi(CCR0, new_val_reg, 0);
385 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
386
387 address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
388 //__ load_const_optimized(R0, c_code);
389 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
390 __ mtctr(R0);
391 __ mr(R0, addr_reg); // Pass addr in R0.
392 __ bctrl();
393 __ b(*stub->continuation());
394 }
395
396 #undef __
397 #define __ sasm->
398
399 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
400 BarrierSet* bs = BarrierSet::barrier_set();
401
402 __ set_info("g1_pre_barrier_slow_id", false);
403
404 // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
405 const int stack_slots = 3;
406 Register pre_val = R0; // previous value of memory
407 Register tmp = R14;
408 Register tmp2 = R15;
409
410 Label refill, restart, marking_not_active;
411 int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
412 int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
413 int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
414
415 // Spill
416 __ std(tmp, -16, R1_SP);
417 __ std(tmp2, -24, R1_SP);
418
419 // Is marking still active?
420 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
421 __ lwz(tmp, satb_q_active_byte_offset, R16_thread);
422 } else {
423 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
424 __ lbz(tmp, satb_q_active_byte_offset, R16_thread);
425 }
426 __ cmpdi(CCR0, tmp, 0);
427 __ beq(CCR0, marking_not_active);
428
429 __ bind(restart);
430 // Load the index into the SATB buffer. SATBMarkQueue::_index is a
431 // size_t so ld_ptr is appropriate.
432 __ ld(tmp, satb_q_index_byte_offset, R16_thread);
433
434 // index == 0?
435 __ cmpdi(CCR0, tmp, 0);
436 __ beq(CCR0, refill);
437
438 __ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
439 __ ld(pre_val, -8, R1_SP); // Load from stack.
440 __ addi(tmp, tmp, -oopSize);
441
442 __ std(tmp, satb_q_index_byte_offset, R16_thread);
443 __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
444
445 __ bind(marking_not_active);
446 // Restore temp registers and return-from-leaf.
447 __ ld(tmp2, -24, R1_SP);
448 __ ld(tmp, -16, R1_SP);
449 __ blr();
450
451 __ bind(refill);
452 const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
453 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
454 __ mflr(R0);
455 __ std(R0, _abi(lr), R1_SP);
456 __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
457 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
458 __ pop_frame();
459 __ ld(R0, _abi(lr), R1_SP);
460 __ mtlr(R0);
461 __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
462 __ b(restart);
463 }
464
465 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
466 G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
467
468 __ set_info("g1_post_barrier_slow_id", false);
469
470 // Using stack slots: spill addr, spill tmp2
471 const int stack_slots = 2;
472 Register tmp = R0;
473 Register addr = R14;
474 Register tmp2 = R15;
475 jbyte* byte_map_base = bs->card_table()->byte_map_base();
476
477 Label restart, refill, ret;
478
479 // Spill
480 __ std(addr, -8, R1_SP);
481 __ std(tmp2, -16, R1_SP);
482
483 __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
484 __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
485 __ add(addr, tmp2, addr);
486 __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
487
488 // Return if young card.
489 __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
490 __ beq(CCR0, ret);
491
492 // Return if sequential consistent value is already dirty.
493 __ membar(Assembler::StoreLoad);
494 __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
495
496 __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
497 __ beq(CCR0, ret);
498
499 // Not dirty.
500
501 // First, dirty it.
502 __ li(tmp, G1CardTable::dirty_card_val());
503 __ stb(tmp, 0, addr);
504
505 int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
506 int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
507
508 __ bind(restart);
509
510 // Get the index into the update buffer. DirtyCardQueue::_index is
511 // a size_t so ld_ptr is appropriate here.
512 __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
513
514 // index == 0?
515 __ cmpdi(CCR0, tmp2, 0);
516 __ beq(CCR0, refill);
517
518 __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
519 __ addi(tmp2, tmp2, -oopSize);
520
521 __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
522 __ add(tmp2, tmp, tmp2);
523 __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
524
525 // Restore temp registers and return-from-leaf.
526 __ bind(ret);
527 __ ld(tmp2, -16, R1_SP);
528 __ ld(addr, -8, R1_SP);
529 __ blr();
530
531 __ bind(refill);
532 const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
533 __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
534 __ mflr(R0);
535 __ std(R0, _abi(lr), R1_SP);
536 __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
537 __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
538 __ pop_frame();
539 __ ld(R0, _abi(lr), R1_SP);
540 __ mtlr(R0);
541 __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
542 __ b(restart);
543 }
544
545 #undef __
|