1 /*
   2  * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "asm/assembler.hpp"
  28 #include "asm/assembler.inline.hpp"
  29 #include "asm/macroAssembler.hpp"
  30 #include "oops/valueKlass.inline.hpp"
  31 #include "runtime/signature_cc.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #ifdef COMPILER2
  34 #include "opto/compile.hpp"
  35 #include "opto/node.hpp"
  36 #endif
  37 
  38 void MacroAssembler::skip_unpacked_fields(const GrowableArray<SigEntry>* sig, int& sig_index, VMRegPair* regs_from, int regs_from_count, int& from_index) {
  39   ScalarizedValueArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
  40   VMRegPair from_pair;
  41   BasicType bt;
  42   while (stream.next(from_pair, bt)) {}
  43   sig_index = stream.sig_cc_index();
  44   from_index = stream.regs_cc_index();
  45 }
  46 
  47 bool MacroAssembler::is_reg_in_unpacked_fields(const GrowableArray<SigEntry>* sig, int sig_index, VMReg to, VMRegPair* regs_from, int regs_from_count, int from_index) {
  48   ScalarizedValueArgsStream stream(sig, sig_index, regs_from, regs_from_count, from_index);
  49   VMRegPair from_pair;
  50   BasicType bt;
  51   while (stream.next(from_pair, bt)) {
  52     if (from_pair.first() == to) {
  53       return true;
  54     }
  55   }
  56 
  57   return false;
  58 }
  59 
  60 void MacroAssembler::mark_reg_writable(const VMRegPair* regs, int num_regs, int reg_index, MacroAssembler::RegState* reg_state) {
  61   assert(0 <= reg_index && reg_index < num_regs, "sanity");
  62   VMReg from_reg = regs[reg_index].first();
  63   if (from_reg->is_valid()) {
  64     assert(from_reg->is_stack(), "reserved entries must be stack");
  65     reg_state[from_reg->value()] = MacroAssembler::reg_writable;
  66   }
  67 }
  68 
  69 void MacroAssembler::mark_reserved_entries_writable(const GrowableArray<SigEntry>* sig_cc, const VMRegPair* regs, int num_regs, MacroAssembler::RegState* reg_state) {
  70   int reg_index = 0;
  71   for (int sig_index = 0; sig_index <sig_cc->length(); sig_index ++) {
  72     if (SigEntry::is_reserved_entry(sig_cc, sig_index)) {
  73       mark_reg_writable(regs, num_regs, reg_index, reg_state);
  74       reg_index ++;
  75     } else if (SigEntry::skip_value_delimiters(sig_cc, sig_index)) {
  76       reg_index ++;
  77     } else {
  78       int vt = 1;
  79       do {
  80         sig_index++;
  81         BasicType bt = sig_cc->at(sig_index)._bt;
  82         if (bt == T_VALUETYPE) {
  83           vt++;
  84         } else if (bt == T_VOID &&
  85                    sig_cc->at(sig_index-1)._bt != T_LONG &&
  86                    sig_cc->at(sig_index-1)._bt != T_DOUBLE) {
  87           vt--;
  88         } else if (SigEntry::is_reserved_entry(sig_cc, sig_index)) {
  89           mark_reg_writable(regs, num_regs, reg_index, reg_state);
  90           reg_index++;
  91         } else {
  92           reg_index++;
  93         }
  94       } while (vt != 0);
  95     }
  96   }
  97 }
  98 
  99 MacroAssembler::RegState* MacroAssembler::init_reg_state(bool is_packing, const GrowableArray<SigEntry>* sig_cc,
 100                                                          VMRegPair* regs, int num_regs, int sp_inc, int max_stack) {
 101   int max_reg = VMRegImpl::stack2reg(max_stack)->value();
 102   MacroAssembler::RegState* reg_state = NEW_RESOURCE_ARRAY(MacroAssembler::RegState, max_reg);
 103 
 104   // Make all writable
 105   for (int i = 0; i < max_reg; ++i) {
 106     reg_state[i] = MacroAssembler::reg_writable;
 107   }
 108   // Set all source registers/stack slots to readonly to prevent accidental overwriting
 109   for (int i = 0; i < num_regs; ++i) {
 110     VMReg reg = regs[i].first();
 111     if (!reg->is_valid()) continue;
 112     if (reg->is_stack()) {
 113       // Update source stack location by adding stack increment
 114       reg = VMRegImpl::stack2reg(reg->reg2stack() + sp_inc/VMRegImpl::stack_slot_size);
 115       regs[i] = reg;
 116     }
 117     assert(reg->value() >= 0 && reg->value() < max_reg, "reg value out of bounds");
 118     reg_state[reg->value()] = MacroAssembler::reg_readonly;
 119   }
 120   if (is_packing) {
 121     // The reserved entries are not used by the packed args, so make them writable
 122     mark_reserved_entries_writable(sig_cc, regs, num_regs, reg_state);
 123   }
 124 
 125   return reg_state;
 126 }
 127 
 128 int MacroAssembler::unpack_value_args_common(Compile* C, bool receiver_only) {
 129   assert(C->has_scalarized_args(), "value type argument scalarization is disabled");
 130   Method* method = C->method()->get_Method();
 131   const GrowableArray<SigEntry>* sig_cc = method->adapter()->get_sig_cc();
 132   assert(sig_cc != NULL, "must have scalarized signature");
 133 
 134   // Get unscalarized calling convention
 135   BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sig_cc->length()); // FIXME - may underflow if we support values with no fields!
 136   int args_passed = 0;
 137   if (!method->is_static()) {
 138     sig_bt[args_passed++] = T_OBJECT;
 139   }
 140   if (!receiver_only) {
 141     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
 142       BasicType bt = ss.type();
 143       sig_bt[args_passed++] = bt;
 144       if (type2size[bt] == 2) {
 145         sig_bt[args_passed++] = T_VOID;
 146       }
 147     }
 148   } else {
 149     // Only unpack the receiver, all other arguments are already scalarized
 150     InstanceKlass* holder = method->method_holder();
 151     int rec_len = holder->is_value() ? ValueKlass::cast(holder)->extended_sig()->length() : 1;
 152     // Copy scalarized signature but skip receiver, value type delimiters and reserved entries
 153     for (int i = 0; i < sig_cc->length(); i++) {
 154       if (!SigEntry::is_reserved_entry(sig_cc, i)) {
 155         if (SigEntry::skip_value_delimiters(sig_cc, i) && rec_len <= 0) {
 156           sig_bt[args_passed++] = sig_cc->at(i)._bt;
 157         }
 158         rec_len--;
 159       }
 160     }
 161   }
 162   VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, args_passed);
 163   int args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, args_passed, false);
 164 
 165   // Get scalarized calling convention
 166   int args_passed_cc = SigEntry::fill_sig_bt(sig_cc, sig_bt);
 167   VMRegPair* regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, sig_cc->length());
 168   int args_on_stack_cc = SharedRuntime::java_calling_convention(sig_bt, regs_cc, args_passed_cc, false);
 169 
 170   int extra_stack_offset = wordSize; // stack has the returned address
 171   int sp_inc = shuffle_value_args(false, receiver_only, extra_stack_offset, sig_bt, sig_cc,
 172                                   args_passed, args_on_stack, regs,
 173                                   args_passed_cc, args_on_stack_cc, regs_cc);
 174   return sp_inc;
 175 }
 176 
 177 int MacroAssembler::shuffle_value_args_common(bool is_packing, bool receiver_only, int extra_stack_offset,
 178                                               BasicType* sig_bt, const GrowableArray<SigEntry>* sig_cc,
 179                                               int args_passed, int args_on_stack, VMRegPair* regs,            // from
 180                                               int args_passed_to, int args_on_stack_to, VMRegPair* regs_to,   // to
 181                                               int sp_inc, int ret_off) {
 182   int max_stack = MAX2(args_on_stack + sp_inc/VMRegImpl::stack_slot_size, args_on_stack_to);
 183   RegState* reg_state = init_reg_state(is_packing, sig_cc, regs, args_passed, sp_inc, max_stack);
 184 
 185   // Emit code for packing/unpacking value type arguments
 186   // We try multiple times and eventually start spilling to resolve (circular) dependencies
 187   bool done = false;
 188   for (int i = 0; i < 2*args_passed_to && !done; ++i) {
 189     done = true;
 190     bool spill = (i > args_passed_to); // Start spilling?
 191     // Iterate over all arguments (when unpacking, do in reverse)
 192     int step = is_packing ? 1 : -1;
 193     int from_index    = is_packing ? 0 : args_passed      - 1;
 194     int to_index      = is_packing ? 0 : args_passed_to   - 1;
 195     int sig_index     = is_packing ? 0 : sig_cc->length() - 1;
 196     int sig_index_end = is_packing ? sig_cc->length() : -1;
 197     int vtarg_index = 0;
 198     for (; sig_index != sig_index_end; sig_index += step) {
 199       assert(0 <= sig_index && sig_index < sig_cc->length(), "index out of bounds");
 200       if (SigEntry::is_reserved_entry(sig_cc, sig_index)) {
 201         if (is_packing) {
 202           from_index += step;
 203         } else {
 204           to_index += step;
 205         }
 206       } else {
 207         assert(0 <= from_index && from_index < args_passed, "index out of bounds");
 208         assert(0 <= to_index && to_index < args_passed_to, "index out of bounds");
 209         if (spill) {
 210           // This call returns true IFF we should keep trying to spill in this round.
 211           spill = shuffle_value_args_spill(is_packing, sig_cc, sig_index, regs, from_index, args_passed,
 212                                            reg_state, ret_off, extra_stack_offset);
 213         }
 214         BasicType bt = sig_cc->at(sig_index)._bt;
 215         if (SigEntry::skip_value_delimiters(sig_cc, sig_index)) {
 216           VMReg from_reg = regs[from_index].first();
 217           done &= move_helper(from_reg, regs_to[to_index].first(), bt, reg_state, ret_off, extra_stack_offset);
 218           to_index += step;
 219         } else if (is_packing || !receiver_only || (from_index == 0 && bt == T_VOID)) {
 220           if (is_packing) {
 221             VMReg reg_to = regs_to[to_index].first();
 222             done &= pack_value_helper(sig_cc, sig_index, vtarg_index, reg_to, regs, args_passed, from_index,
 223                                       reg_state, ret_off, extra_stack_offset);
 224             vtarg_index ++;
 225             to_index ++;
 226             continue; // from_index already adjusted
 227           } else {
 228             VMReg from_reg = regs[from_index].first();
 229             done &= unpack_value_helper(sig_cc, sig_index, from_reg, regs_to, to_index, reg_state, ret_off, extra_stack_offset);
 230           }
 231         } else {
 232           continue;
 233         }
 234         from_index += step;
 235       }
 236     }
 237   }
 238   guarantee(done, "Could not resolve circular dependency when shuffling value type arguments");
 239   return sp_inc;
 240 }
 241 
 242 bool MacroAssembler::shuffle_value_args_spill(bool is_packing, const GrowableArray<SigEntry>* sig_cc, int sig_cc_index,
 243                                               VMRegPair* regs_from, int from_index, int regs_from_count,
 244                                               RegState* reg_state, int ret_off, int extra_stack_offset) {
 245   VMReg reg;
 246 
 247   if (!is_packing || SigEntry::skip_value_delimiters(sig_cc, sig_cc_index)) {
 248     reg = regs_from[from_index].first();
 249     if (!reg->is_valid() || reg_state[reg->value()] != reg_readonly) {
 250       // Spilling this won't break circles
 251       return true;
 252     }
 253   } else {
 254     ScalarizedValueArgsStream stream(sig_cc, sig_cc_index, regs_from, regs_from_count, from_index);
 255     VMRegPair from_pair;
 256     BasicType bt;
 257     bool found = false;
 258     while (stream.next(from_pair, bt)) {
 259       reg = from_pair.first();
 260       assert(reg->is_valid(), "must be");
 261       if (reg_state[reg->value()] == reg_readonly) {
 262         found = true;
 263         break;
 264       }
 265     }
 266     if (!found) {
 267       // Spilling fields in this value arg won't break circles
 268       return true;
 269     }
 270   }
 271 
 272   // Spill argument to be able to write the source and resolve circular dependencies
 273   VMReg spill_reg = spill_reg_for(reg);
 274   if (reg_state[spill_reg->value()] == reg_readonly) {
 275     // We have already spilled (in previous round). The spilled register should be consumed by this round.
 276   } else {
 277     bool res = move_helper(reg, spill_reg, T_DOUBLE, reg_state, ret_off, extra_stack_offset);
 278     assert(res, "Spilling should not fail");
 279     // Set spill_reg as new source and update state
 280     reg = spill_reg;
 281     regs_from[from_index].set1(reg);
 282     reg_state[reg->value()] = reg_readonly;
 283   }
 284 
 285   return false; // Do not spill again in this round
 286 }