1 /*
   2  * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2018, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "registerSaver_s390.hpp"
  29 #include "gc/g1/g1CardTable.hpp"
  30 #include "gc/g1/g1BarrierSet.hpp"
  31 #include "gc/g1/g1BarrierSetAssembler.hpp"
  32 #include "gc/g1/g1BarrierSetRuntime.hpp"
  33 #include "gc/g1/g1DirtyCardQueue.hpp"
  34 #include "gc/g1/g1SATBMarkQueueSet.hpp"
  35 #include "gc/g1/g1ThreadLocalData.hpp"
  36 #include "gc/g1/heapRegion.hpp"
  37 #include "interpreter/interp_masm.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #ifdef COMPILER1
  40 #include "c1/c1_LIRAssembler.hpp"
  41 #include "c1/c1_MacroAssembler.hpp"
  42 #include "gc/g1/c1/g1BarrierSetC1.hpp"
  43 #endif
  44 
  45 #define __ masm->
  46 
  47 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
  48 
  49 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
  50                                                             Register addr, Register count) {
  51   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  52 
  53   // With G1, don't generate the call if we statically know that the target is uninitialized.
  54   if (!dest_uninitialized) {
  55     // Is marking active?
  56     Label filtered;
  57     assert_different_registers(addr,  Z_R0_scratch);  // would be destroyed by push_frame()
  58     assert_different_registers(count, Z_R0_scratch);  // would be destroyed by push_frame()
  59     Register Rtmp1 = Z_R0_scratch;
  60     const int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
  61     if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
  62       __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
  63     } else {
  64       guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
  65       __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
  66     }
  67     __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
  68 
  69     RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
  70 
  71     if (UseCompressedOops) {
  72       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), addr, count);
  73     } else {
  74       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), addr, count);
  75     }
  76 
  77     RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
  78 
  79     __ bind(filtered);
  80   }
  81 }
  82 
  83 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
  84                                                              Register addr, Register count, bool do_return) {
  85   address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry);
  86   if (!do_return) {
  87     assert_different_registers(addr,  Z_R0_scratch);  // would be destroyed by push_frame()
  88     assert_different_registers(count, Z_R0_scratch);  // would be destroyed by push_frame()
  89     RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
  90     __ call_VM_leaf(entry_point, addr, count);
  91     RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
  92   } else {
  93     // Tail call: call c and return to stub caller.
  94     __ lgr_if_needed(Z_ARG1, addr);
  95     __ lgr_if_needed(Z_ARG2, count);
  96     __ load_const(Z_R1, entry_point);
  97     __ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
  98   }
  99 }
 100 
 101 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 102                                     const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null) {
 103   bool on_oop = type == T_OBJECT || type == T_ARRAY;
 104   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 105   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
 106   bool on_reference = on_weak || on_phantom;
 107   Label done;
 108   if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; }
 109   ModRefBarrierSetAssembler::load_at(masm, decorators, type, src, dst, tmp1, tmp2, L_handle_null);
 110   if (on_oop && on_reference) {
 111     // Generate the G1 pre-barrier code to log the value of
 112     // the referent field in an SATB buffer.
 113     g1_write_barrier_pre(masm, decorators | IS_NOT_NULL,
 114                          NULL /* obj */,
 115                          dst  /* pre_val */,
 116                          noreg/* preserve */ ,
 117                          tmp1, tmp2 /* tmp */,
 118                          true /* pre_val_needed */);
 119   }
 120   __ bind(done);
 121 }
 122 
 123 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators,
 124                                                  const Address*  obj,
 125                                                  Register        Rpre_val,      // Ideally, this is a non-volatile register.
 126                                                  Register        Rval,          // Will be preserved.
 127                                                  Register        Rtmp1,         // If Rpre_val is volatile, either Rtmp1
 128                                                  Register        Rtmp2,         // or Rtmp2 has to be non-volatile.
 129                                                  bool            pre_val_needed // Save Rpre_val across runtime call, caller uses it.
 130                                                  ) {
 131 
 132   bool not_null  = (decorators & IS_NOT_NULL) != 0,
 133        preloaded = obj == NULL;
 134 
 135   const Register Robj = obj ? obj->base() : noreg,
 136                  Roff = obj ? obj->index() : noreg;
 137   const int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 138   const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 139   const int index_offset  = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
 140   assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!!
 141   assert_different_registers(Robj, Z_R0_scratch);         // Used for addressing. Furthermore, push_frame destroys Z_R0!!
 142   assert_different_registers(Rval, Z_R0_scratch);         // push_frame destroys Z_R0!!
 143 
 144   Label callRuntime, filtered;
 145 
 146   BLOCK_COMMENT("g1_write_barrier_pre {");
 147 
 148   // Is marking active?
 149   // Note: value is loaded for test purposes only. No further use here.
 150   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 151     __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
 152   } else {
 153     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 154     __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
 155   }
 156   __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
 157 
 158   assert(Rpre_val != noreg, "must have a real register");
 159 
 160 
 161   // If an object is given, we need to load the previous value into Rpre_val.
 162   if (obj) {
 163     // Load the previous value...
 164     if (UseCompressedOops) {
 165       __ z_llgf(Rpre_val, *obj);
 166     } else {
 167       __ z_lg(Rpre_val, *obj);
 168     }
 169   }
 170 
 171   // Is the previous value NULL?
 172   // If so, we don't need to record it and we're done.
 173   // Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
 174   //       Register contents is preserved across runtime call if caller requests to do so.
 175   if (preloaded && not_null) {
 176 #ifdef ASSERT
 177     __ z_ltgr(Rpre_val, Rpre_val);
 178     __ asm_assert_ne("null oop not allowed (G1 pre)", 0x321); // Checked by caller.
 179 #endif
 180   } else {
 181     __ z_ltgr(Rpre_val, Rpre_val);
 182     __ z_bre(filtered); // previous value is NULL, so we don't need to record it.
 183   }
 184 
 185   // Decode the oop now. We know it's not NULL.
 186   if (Robj != noreg && UseCompressedOops) {
 187     __ oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
 188   }
 189 
 190   // OK, it's not filtered, so we'll need to call enqueue.
 191 
 192   // We can store the original value in the thread's buffer
 193   // only if index > 0. Otherwise, we need runtime to handle.
 194   // (The index field is typed as size_t.)
 195   Register Rbuffer = Rtmp1, Rindex = Rtmp2;
 196   assert_different_registers(Rbuffer, Rindex, Rpre_val);
 197 
 198   __ z_lg(Rbuffer, buffer_offset, Z_thread);
 199 
 200   __ load_and_test_long(Rindex, Address(Z_thread, index_offset));
 201   __ z_bre(callRuntime); // If index == 0, goto runtime.
 202 
 203   __ add2reg(Rindex, -wordSize); // Decrement index.
 204   __ z_stg(Rindex, index_offset, Z_thread);
 205 
 206   // Record the previous value.
 207   __ z_stg(Rpre_val, 0, Rbuffer, Rindex);
 208   __ z_bru(filtered);  // We are done.
 209 
 210   Rbuffer = noreg;  // end of life
 211   Rindex  = noreg;  // end of life
 212 
 213   __ bind(callRuntime);
 214 
 215   // Save some registers (inputs and result) over runtime call
 216   // by spilling them into the top frame.
 217   if (Robj != noreg && Robj->is_volatile()) {
 218     __ z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
 219   }
 220   if (Roff != noreg && Roff->is_volatile()) {
 221     __ z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
 222   }
 223   if (Rval != noreg && Rval->is_volatile()) {
 224     __ z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
 225   }
 226 
 227   // Save Rpre_val (result) over runtime call.
 228   Register Rpre_save = Rpre_val;
 229   if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) {
 230     guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
 231     Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
 232   }
 233   __ lgr_if_needed(Rpre_save, Rpre_val);
 234 
 235   // Push frame to protect top frame with return pc and spilled register values.
 236   __ save_return_pc();
 237   __ push_frame_abi160(0); // Will use Z_R0 as tmp.
 238 
 239   // Rpre_val may be destroyed by push_frame().
 240   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), Rpre_save, Z_thread);
 241 
 242   __ pop_frame();
 243   __ restore_return_pc();
 244 
 245   // Restore spilled values.
 246   if (Robj != noreg && Robj->is_volatile()) {
 247     __ z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
 248   }
 249   if (Roff != noreg && Roff->is_volatile()) {
 250     __ z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
 251   }
 252   if (Rval != noreg && Rval->is_volatile()) {
 253     __ z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
 254   }
 255   if (pre_val_needed && Rpre_val->is_volatile()) {
 256     __ lgr_if_needed(Rpre_val, Rpre_save);
 257   }
 258 
 259   __ bind(filtered);
 260   BLOCK_COMMENT("} g1_write_barrier_pre");
 261 }
 262 
 263 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators, Register Rstore_addr, Register Rnew_val,
 264                                                   Register Rtmp1, Register Rtmp2, Register Rtmp3) {
 265   bool not_null = (decorators & IS_NOT_NULL) != 0;
 266 
 267   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
 268 
 269   Label callRuntime, filtered;
 270 
 271   CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
 272 
 273   BLOCK_COMMENT("g1_write_barrier_post {");
 274 
 275   // Does store cross heap regions?
 276   // It does if the two addresses specify different grain addresses.
 277   if (VM_Version::has_DistinctOpnds()) {
 278     __ z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
 279   } else {
 280     __ z_lgr(Rtmp1, Rstore_addr);
 281     __ z_xgr(Rtmp1, Rnew_val);
 282   }
 283   __ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
 284   __ z_bre(filtered);
 285 
 286   // Crosses regions, storing NULL?
 287   if (not_null) {
 288 #ifdef ASSERT
 289     __ z_ltgr(Rnew_val, Rnew_val);
 290     __ asm_assert_ne("null oop not allowed (G1 post)", 0x322); // Checked by caller.
 291 #endif
 292   } else {
 293     __ z_ltgr(Rnew_val, Rnew_val);
 294     __ z_bre(filtered);
 295   }
 296 
 297   Rnew_val = noreg; // end of lifetime
 298 
 299   // Storing region crossing non-NULL, is card already dirty?
 300   assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
 301   // Make sure not to use Z_R0 for any of these registers.
 302   Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
 303   Register Rbase      = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
 304 
 305   // calculate address of card
 306   __ load_const_optimized(Rbase, (address)ct->card_table()->byte_map_base());      // Card table base.
 307   __ z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift);         // Index into card table.
 308   __ z_algr(Rcard_addr, Rbase);                                      // Explicit calculation needed for cli.
 309   Rbase = noreg; // end of lifetime
 310 
 311   // Filter young.
 312   assert((unsigned int)G1CardTable::g1_young_card_val() <= 255, "otherwise check this code");
 313   __ z_cli(0, Rcard_addr, G1CardTable::g1_young_card_val());
 314   __ z_bre(filtered);
 315 
 316   // Check the card value. If dirty, we're done.
 317   // This also avoids false sharing of the (already dirty) card.
 318   __ z_sync(); // Required to support concurrent cleaning.
 319   assert((unsigned int)G1CardTable::dirty_card_val() <= 255, "otherwise check this code");
 320   __ z_cli(0, Rcard_addr, G1CardTable::dirty_card_val()); // Reload after membar.
 321   __ z_bre(filtered);
 322 
 323   // Storing a region crossing, non-NULL oop, card is clean.
 324   // Dirty card and log.
 325   __ z_mvi(0, Rcard_addr, G1CardTable::dirty_card_val());
 326 
 327   Register Rcard_addr_x = Rcard_addr;
 328   Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
 329   Register Rqueue_buf   = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1;
 330   const int qidx_off    = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
 331   const int qbuf_off    = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
 332   if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) {
 333     Rcard_addr_x = Z_R0_scratch;  // Register shortage. We have to use Z_R0.
 334   }
 335   __ lgr_if_needed(Rcard_addr_x, Rcard_addr);
 336 
 337   __ load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off));
 338   __ z_bre(callRuntime); // Index == 0 then jump to runtime.
 339 
 340   __ z_lg(Rqueue_buf, qbuf_off, Z_thread);
 341 
 342   __ add2reg(Rqueue_index, -wordSize); // Decrement index.
 343   __ z_stg(Rqueue_index, qidx_off, Z_thread);
 344 
 345   __ z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card.
 346   __ z_bru(filtered);
 347 
 348   __ bind(callRuntime);
 349 
 350   // TODO: do we need a frame? Introduced to be on the safe side.
 351   bool needs_frame = true;
 352   __ lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch!
 353 
 354   // VM call need frame to access(write) O register.
 355   if (needs_frame) {
 356     __ save_return_pc();
 357     __ push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
 358   }
 359 
 360   // Save the live input values.
 361   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, Z_thread);
 362 
 363   if (needs_frame) {
 364     __ pop_frame();
 365     __ restore_return_pc();
 366   }
 367 
 368   __ bind(filtered);
 369 
 370   BLOCK_COMMENT("} g1_write_barrier_post");
 371 }
 372 
 373 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 374                                          const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
 375   bool is_array = (decorators & IS_ARRAY) != 0;
 376   bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 377   bool precise = is_array || on_anonymous;
 378   // Load and record the previous value.
 379   g1_write_barrier_pre(masm, decorators, &dst, tmp3, val, tmp1, tmp2, false);
 380 
 381   BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
 382 
 383   // No need for post barrier if storing NULL
 384   if (val != noreg) {
 385     const Register base = dst.base(),
 386                    idx  = dst.index();
 387     const intptr_t disp = dst.disp();
 388     if (precise && (disp != 0 || idx != noreg)) {
 389       __ add2reg_with_index(base, disp, idx, base);
 390     }
 391     g1_write_barrier_post(masm, decorators, base, val, tmp1, tmp2, tmp3);
 392   }
 393 }
 394 
 395 void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) {
 396   NearLabel Ldone, Lnot_weak;
 397   __ z_ltgr(tmp1, value);
 398   __ z_bre(Ldone);          // Use NULL result as-is.
 399 
 400   __ z_nill(value, ~JNIHandles::weak_tag_mask);
 401   __ z_lg(value, 0, value); // Resolve (untagged) jobject.
 402 
 403   __ z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
 404   __ z_braz(Lnot_weak);
 405   __ verify_oop(value);
 406   DecoratorSet decorators = IN_NATIVE | ON_PHANTOM_OOP_REF;
 407   g1_write_barrier_pre(masm, decorators, (const Address*)NULL, value, noreg, tmp1, tmp2, true);
 408   __ bind(Lnot_weak);
 409   __ verify_oop(value);
 410   __ bind(Ldone);
 411 }
 412 
 413 #ifdef COMPILER1
 414 
 415 #undef __
 416 #define __ ce->masm()->
 417 
 418 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
 419   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 420   // At this point we know that marking is in progress.
 421   // If do_load() is true then we have to emit the
 422   // load of the previous value; otherwise it has already
 423   // been loaded into _pre_val.
 424   __ bind(*stub->entry());
 425   ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
 426   assert(stub->pre_val()->is_register(), "Precondition.");
 427 
 428   Register pre_val_reg = stub->pre_val()->as_register();
 429 
 430   if (stub->do_load()) {
 431     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
 432   }
 433 
 434   __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
 435   __ branch_optimized(Assembler::bcondZero, *stub->continuation());
 436   ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
 437   __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
 438 }
 439 
 440 void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
 441   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 442   __ bind(*stub->entry());
 443   ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
 444   assert(stub->addr()->is_register(), "Precondition.");
 445   assert(stub->new_val()->is_register(), "Precondition.");
 446   Register new_val_reg = stub->new_val()->as_register();
 447   __ z_ltgr(new_val_reg, new_val_reg);
 448   __ branch_optimized(Assembler::bcondZero, *stub->continuation());
 449   __ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register());
 450   ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin());
 451   __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
 452 }
 453 
 454 #undef __
 455 
 456 #define __ sasm->
 457 
 458 static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
 459   __ block_comment("save_volatile_registers");
 460   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
 461   int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 462   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 463   return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
 464 }
 465 
 466 static void restore_volatile_registers(StubAssembler* sasm) {
 467   __ block_comment("restore_volatile_registers");
 468   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
 469   RegisterSaver::restore_live_registers(sasm, reg_set);
 470 }
 471 
 472 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
 473   // Z_R1_scratch: previous value of memory
 474 
 475   BarrierSet* bs = BarrierSet::barrier_set();
 476   __ set_info("g1_pre_barrier_slow_id", false);
 477 
 478   Register pre_val = Z_R1_scratch;
 479   Register tmp  = Z_R6; // Must be non-volatile because it is used to save pre_val.
 480   Register tmp2 = Z_R7;
 481 
 482   Label refill, restart, marking_not_active;
 483   int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
 484   int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
 485   int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
 486 
 487   // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
 488   __ z_stg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 489   __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 490 
 491   // Is marking still active?
 492   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 493     __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
 494   } else {
 495     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 496     __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
 497   }
 498   __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
 499 
 500   __ bind(restart);
 501   // Load the index into the SATB buffer. SATBMarkQueue::_index is a
 502   // size_t so ld_ptr is appropriate.
 503   __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
 504 
 505   // index == 0?
 506   __ z_brz(refill);
 507 
 508   __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
 509   __ add2reg(tmp, -oopSize);
 510 
 511   __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
 512   __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
 513 
 514   __ bind(marking_not_active);
 515   // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
 516   __ z_lg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 517   __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 518   __ z_br(Z_R14);
 519 
 520   __ bind(refill);
 521   save_volatile_registers(sasm);
 522   __ z_lgr(tmp, pre_val); // save pre_val
 523   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1SATBMarkQueueSet::handle_zero_index_for_thread),
 524                   Z_thread);
 525   __ z_lgr(pre_val, tmp); // restore pre_val
 526   restore_volatile_registers(sasm);
 527   __ z_bru(restart);
 528 }
 529 
 530 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
 531   // Z_R1_scratch: oop address, address of updated memory slot
 532 
 533   BarrierSet* bs = BarrierSet::barrier_set();
 534   __ set_info("g1_post_barrier_slow_id", false);
 535 
 536   Register addr_oop  = Z_R1_scratch;
 537   Register addr_card = Z_R1_scratch;
 538   Register r1        = Z_R6; // Must be saved/restored.
 539   Register r2        = Z_R7; // Must be saved/restored.
 540   Register cardtable = r1;   // Must be non-volatile, because it is used to save addr_card.
 541   CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
 542   CardTable* ct = ctbs->card_table();
 543   CardTable::CardValue* byte_map_base = ct->byte_map_base();
 544 
 545   // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
 546   __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 547 
 548   Label not_already_dirty, restart, refill, young_card;
 549 
 550   // Calculate address of card corresponding to the updated oop slot.
 551   AddressLiteral rs(byte_map_base);
 552   __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
 553   addr_oop = noreg; // dead now
 554   __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
 555   __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
 556 
 557   __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
 558   __ z_bre(young_card);
 559 
 560   __ z_sync(); // Required to support concurrent cleaning.
 561 
 562   __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
 563   __ z_brne(not_already_dirty);
 564 
 565   __ bind(young_card);
 566   // We didn't take the branch, so we're already dirty: restore
 567   // used registers and return.
 568   __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 569   __ z_br(Z_R14);
 570 
 571   // Not dirty.
 572   __ bind(not_already_dirty);
 573 
 574   // First, dirty it: [addr_card] := 0
 575   __ z_mvi(0, addr_card, CardTable::dirty_card_val());
 576 
 577   Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
 578   Register buf = r2;
 579   cardtable = noreg; // now dead
 580 
 581   // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
 582   __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 583 
 584   ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
 585   ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
 586 
 587   __ bind(restart);
 588 
 589   // Get the index into the update buffer. G1DirtyCardQueue::_index is
 590   // a size_t so z_ltg is appropriate here.
 591   __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
 592 
 593   // index == 0?
 594   __ z_brz(refill);
 595 
 596   __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
 597   __ add2reg(idx, -oopSize);
 598 
 599   __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
 600   __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
 601   // Restore killed registers and return.
 602   __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 603   __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 604   __ z_br(Z_R14);
 605 
 606   __ bind(refill);
 607   save_volatile_registers(sasm);
 608   __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
 609   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1DirtyCardQueueSet::handle_zero_index_for_thread),
 610                                    Z_thread);
 611   __ z_lgr(addr_card, idx);
 612   restore_volatile_registers(sasm); // Restore addr_card.
 613   __ z_bru(restart);
 614 }
 615 
 616 #undef __
 617 
 618 #endif // COMPILER1