1 /*
  2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "compiler/compileLog.hpp"
 27 #include "interpreter/linkResolver.hpp"
 28 #include "memory/universe.hpp"
 29 #include "oops/objArrayKlass.hpp"
 30 #include "opto/addnode.hpp"
 31 #include "opto/castnode.hpp"
 32 #include "opto/memnode.hpp"
 33 #include "opto/parse.hpp"
 34 #include "opto/rootnode.hpp"
 35 #include "opto/runtime.hpp"
 36 #include "opto/subnode.hpp"
 37 #include "runtime/deoptimization.hpp"
 38 #include "runtime/handles.inline.hpp"
 39 
 40 //=============================================================================
 41 // Helper methods for _get* and _put* bytecodes
 42 //=============================================================================
 43 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
 44   // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
 45   // Better to check now than to Deoptimize as soon as we execute
 46   assert( field->is_static(), "Only check if field is static");
 47   // is_being_initialized() is too generous.  It allows access to statics
 48   // by threads that are not running the <clinit> before the <clinit> finishes.
 49   // return field->holder()->is_being_initialized();
 50 
 51   // The following restriction is correct but conservative.
 52   // It is also desirable to allow compilation of methods called from <clinit>
 53   // but this generated code will need to be made safe for execution by
 54   // other threads, or the transition from interpreted to compiled code would
 55   // need to be guarded.
 56   ciInstanceKlass *field_holder = field->holder();
 57 
 58   bool access_OK = false;
 59   if (method->holder()->is_subclass_of(field_holder)) {
 60     if (method->is_static()) {
 61       if (method->name() == ciSymbol::class_initializer_name()) {
 62         // OK to access static fields inside initializer
 63         access_OK = true;
 64       }
 65     } else {
 66       if (method->name() == ciSymbol::object_initializer_name()) {
 67         // It's also OK to access static fields inside a constructor,
 68         // because any thread calling the constructor must first have
 69         // synchronized on the class by executing a '_new' bytecode.
 70         access_OK = true;
 71       }
 72     }
 73   }
 74 
 75   return access_OK;
 76 
 77 }
 78 
 79 
 80 void Parse::do_field_access(bool is_get, bool is_field) {
 81   bool will_link;
 82   ciField* field = iter().get_field(will_link);
 83   assert(will_link, "getfield: typeflow responsibility");
 84 
 85   ciInstanceKlass* field_holder = field->holder();
 86 
 87   if (is_field == field->is_static()) {
 88     // Interpreter will throw java_lang_IncompatibleClassChangeError
 89     // Check this before allowing <clinit> methods to access static fields
 90     uncommon_trap(Deoptimization::Reason_unhandled,
 91                   Deoptimization::Action_none);
 92     return;
 93   }
 94 
 95   if (!is_field && !field_holder->is_initialized()) {
 96     if (!static_field_ok_in_clinit(field, method())) {
 97       uncommon_trap(Deoptimization::Reason_uninitialized,
 98                     Deoptimization::Action_reinterpret,
 99                     NULL, "!static_field_ok_in_clinit");
100       return;
101     }
102   }
103 
104   // Deoptimize on putfield writes to call site target field.
105   if (!is_get && field->is_call_site_target()) {
106     uncommon_trap(Deoptimization::Reason_unhandled,
107                   Deoptimization::Action_reinterpret,
108                   NULL, "put to call site target field");
109     return;
110   }
111 
112   assert(field->will_link(method(), bc()), "getfield: typeflow responsibility");
113 
114   // Note:  We do not check for an unloaded field type here any more.
115 
116   // Generate code for the object pointer.
117   Node* obj;
118   if (is_field) {
119     int obj_depth = is_get ? 0 : field->type()->size();
120     obj = null_check(peek(obj_depth));
121     // Compile-time detect of null-exception?
122     if (stopped())  return;
123 
124 #ifdef ASSERT
125     const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
126     assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
127 #endif
128 
129     if (is_get) {
130       (void) pop();  // pop receiver before getting
131       do_get_xxx(obj, field, is_field);
132     } else {
133       do_put_xxx(obj, field, is_field);
134       (void) pop();  // pop receiver after putting
135     }
136   } else {
137     const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
138     obj = _gvn.makecon(tip);
139     if (is_get) {
140       do_get_xxx(obj, field, is_field);
141     } else {
142       do_put_xxx(obj, field, is_field);
143     }
144   }
145 }
146 
147 
148 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
149   BasicType bt = field->layout_type();
150 
151   // Does this field have a constant value?  If so, just push the value.
152   if (field->is_constant() &&
153       // Keep consistent with types found by ciTypeFlow: for an
154       // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
155       // speculates the field is null. The code in the rest of this
156       // method does the same. We must not bypass it and use a non
157       // null constant here.
158       (bt != T_OBJECT || field->type()->is_loaded())) {
159     // final or stable field
160     Node* con = make_constant_from_field(field, obj);
161     if (con != NULL) {
162       push_node(field->layout_type(), con);
163       return;
164     }
165   }
166 
167   ciType* field_klass = field->type();
168   bool is_vol = field->is_volatile();
169 
170   // Compute address and memory type.
171   int offset = field->offset_in_bytes();
172   const TypePtr* adr_type = C->alias_type(field)->adr_type();
173   Node *adr = basic_plus_adr(obj, obj, offset);
174 
175   // Build the resultant type of the load
176   const Type *type;
177 
178   bool must_assert_null = false;
179 
180   if( bt == T_OBJECT ) {
181     if (!field->type()->is_loaded()) {
182       type = TypeInstPtr::BOTTOM;
183       must_assert_null = true;
184     } else if (field->is_static_constant()) {
185       // This can happen if the constant oop is non-perm.
186       ciObject* con = field->constant_value().as_object();
187       // Do not "join" in the previous type; it doesn't add value,
188       // and may yield a vacuous result if the field is of interface type.
189       if (con->is_null_object()) {
190         type = TypePtr::NULL_PTR;
191       } else {
192         type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
193       }
194       assert(type != NULL, "field singleton type must be consistent");
195     } else {
196       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
197     }
198   } else {
199     type = Type::get_const_basic_type(bt);
200   }
201   if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
202     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
203   }
204   // Build the load.
205   //
206   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
207   bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
208   Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
209 
210   // Adjust Java stack
211   if (type2size[bt] == 1)
212     push(ld);
213   else
214     push_pair(ld);
215 
216   if (must_assert_null) {
217     // Do not take a trap here.  It's possible that the program
218     // will never load the field's class, and will happily see
219     // null values in this field forever.  Don't stumble into a
220     // trap for such a program, or we might get a long series
221     // of useless recompilations.  (Or, we might load a class
222     // which should not be loaded.)  If we ever see a non-null
223     // value, we will then trap and recompile.  (The trap will
224     // not need to mention the class index, since the class will
225     // already have been loaded if we ever see a non-null value.)
226     // uncommon_trap(iter().get_field_signature_index());
227     if (PrintOpto && (Verbose || WizardMode)) {
228       method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
229     }
230     if (C->log() != NULL) {
231       C->log()->elem("assert_null reason='field' klass='%d'",
232                      C->log()->identify(field->type()));
233     }
234     // If there is going to be a trap, put it at the next bytecode:
235     set_bci(iter().next_bci());
236     null_assert(peek());
237     set_bci(iter().cur_bci()); // put it back
238   }
239 
240   // If reference is volatile, prevent following memory ops from
241   // floating up past the volatile read.  Also prevents commoning
242   // another volatile read.
243   if (field->is_volatile()) {
244     // Memory barrier includes bogus read of value to force load BEFORE membar
245     insert_mem_bar(Op_MemBarAcquire, ld);
246   }
247 }
248 
249 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
250   bool is_vol = field->is_volatile();
251   // If reference is volatile, prevent following memory ops from
252   // floating down past the volatile write.  Also prevents commoning
253   // another volatile read.
254   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
255 
256   // Compute address and memory type.
257   int offset = field->offset_in_bytes();
258   const TypePtr* adr_type = C->alias_type(field)->adr_type();
259   Node* adr = basic_plus_adr(obj, obj, offset);
260   BasicType bt = field->layout_type();
261   // Value to be stored
262   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
263   // Round doubles before storing
264   if (bt == T_DOUBLE)  val = dstore_rounding(val);
265 
266   // Conservatively release stores of object references.
267   const MemNode::MemOrd mo =
268     is_vol ?
269     // Volatile fields need releasing stores.
270     MemNode::release :
271     // Non-volatile fields also need releasing stores if they hold an
272     // object reference, because the object reference might point to
273     // a freshly created object.
274     StoreNode::release_if_reference(bt);
275 
276   // Store the value.
277   Node* store;
278   if (bt == T_OBJECT) {
279     const TypeOopPtr* field_type;
280     if (!field->type()->is_loaded()) {
281       field_type = TypeInstPtr::BOTTOM;
282     } else {
283       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
284     }
285     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
286   } else {
287     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
288     store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
289   }
290 
291   // If reference is volatile, prevent following volatiles ops from
292   // floating up before the volatile write.
293   if (is_vol) {
294     // If not multiple copy atomic, we do the MemBarVolatile before the load.
295     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
296       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
297     }
298     // Remember we wrote a volatile field.
299     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
300     // in constructors which have such stores. See do_exits() in parse1.cpp.
301     if (is_field) {
302       set_wrote_volatile(true);
303     }
304   }
305 
306   if (is_field) {
307     set_wrote_fields(true);
308   }
309 
310   // If the field is final, the rules of Java say we are in <init> or <clinit>.
311   // Note the presence of writes to final non-static fields, so that we
312   // can insert a memory barrier later on to keep the writes from floating
313   // out of the constructor.
314   // Any method can write a @Stable field; insert memory barriers after those also.
315   if (is_field && (field->is_final() || field->is_stable())) {
316     if (field->is_final()) {
317         set_wrote_final(true);
318     }
319     if (field->is_stable()) {
320         set_wrote_stable(true);
321     }
322 
323     // Preserve allocation ptr to create precedent edge to it in membar
324     // generated on exit from constructor.
325     // Can't bind stable with its allocation, only record allocation for final field.
326     if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
327       set_alloc_with_final(obj);
328     }
329   }
330 }
331 
332 //=============================================================================
333 void Parse::do_anewarray() {
334   bool will_link;
335   ciKlass* klass = iter().get_klass(will_link);
336 
337   // Uncommon Trap when class that array contains is not loaded
338   // we need the loaded class for the rest of graph; do not
339   // initialize the container class (see Java spec)!!!
340   assert(will_link, "anewarray: typeflow responsibility");
341 
342   ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
343   // Check that array_klass object is loaded
344   if (!array_klass->is_loaded()) {
345     // Generate uncommon_trap for unloaded array_class
346     uncommon_trap(Deoptimization::Reason_unloaded,
347                   Deoptimization::Action_reinterpret,
348                   array_klass);
349     return;
350   }
351 
352   kill_dead_locals();
353 
354   const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
355   Node* count_val = pop();
356   Node* obj = new_array(makecon(array_klass_type), count_val, 1);
357   push(obj);
358 }
359 
360 
361 void Parse::do_newarray(BasicType elem_type) {
362   kill_dead_locals();
363 
364   Node*   count_val = pop();
365   const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
366   Node*   obj = new_array(makecon(array_klass), count_val, 1);
367   // Push resultant oop onto stack
368   push(obj);
369 }
370 
371 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
372 // Also handle the degenerate 1-dimensional case of anewarray.
373 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
374   Node* length = lengths[0];
375   assert(length != NULL, "");
376   Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
377   if (ndimensions > 1) {
378     jint length_con = find_int_con(length, -1);
379     guarantee(length_con >= 0, "non-constant multianewarray");
380     ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
381     const TypePtr* adr_type = TypeAryPtr::OOPS;
382     const TypeOopPtr*    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
383     const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
384     for (jint i = 0; i < length_con; i++) {
385       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
386       intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
387       Node*    eaddr  = basic_plus_adr(array, offset);
388       store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered);
389     }
390   }
391   return array;
392 }
393 
394 void Parse::do_multianewarray() {
395   int ndimensions = iter().get_dimensions();
396 
397   // the m-dimensional array
398   bool will_link;
399   ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
400   assert(will_link, "multianewarray: typeflow responsibility");
401 
402   // Note:  Array classes are always initialized; no is_initialized check.
403 
404   kill_dead_locals();
405 
406   // get the lengths from the stack (first dimension is on top)
407   Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
408   length[ndimensions] = NULL;  // terminating null for make_runtime_call
409   int j;
410   for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
411 
412   // The original expression was of this form: new T[length0][length1]...
413   // It is often the case that the lengths are small (except the last).
414   // If that happens, use the fast 1-d creator a constant number of times.
415   const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100);
416   int expand_count = 1;        // count of allocations in the expansion
417   int expand_fanout = 1;       // running total fanout
418   for (j = 0; j < ndimensions-1; j++) {
419     int dim_con = find_int_con(length[j], -1);
420     expand_fanout *= dim_con;
421     expand_count  += expand_fanout; // count the level-J sub-arrays
422     if (dim_con <= 0
423         || dim_con > expand_limit
424         || expand_count > expand_limit) {
425       expand_count = 0;
426       break;
427     }
428   }
429 
430   // Can use multianewarray instead of [a]newarray if only one dimension,
431   // or if all non-final dimensions are small constants.
432   if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
433     Node* obj = NULL;
434     // Set the original stack and the reexecute bit for the interpreter
435     // to reexecute the multianewarray bytecode if deoptimization happens.
436     // Do it unconditionally even for one dimension multianewarray.
437     // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
438     // when AllocateArray node for newarray is created.
439     { PreserveReexecuteState preexecs(this);
440       inc_sp(ndimensions);
441       // Pass 0 as nargs since uncommon trap code does not need to restore stack.
442       obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
443     } //original reexecute and sp are set back here
444     push(obj);
445     return;
446   }
447 
448   address fun = NULL;
449   switch (ndimensions) {
450   case 1: ShouldNotReachHere(); break;
451   case 2: fun = OptoRuntime::multianewarray2_Java(); break;
452   case 3: fun = OptoRuntime::multianewarray3_Java(); break;
453   case 4: fun = OptoRuntime::multianewarray4_Java(); break;
454   case 5: fun = OptoRuntime::multianewarray5_Java(); break;
455   };
456   Node* c = NULL;
457 
458   if (fun != NULL) {
459     c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
460                           OptoRuntime::multianewarray_Type(ndimensions),
461                           fun, NULL, TypeRawPtr::BOTTOM,
462                           makecon(TypeKlassPtr::make(array_klass)),
463                           length[0], length[1], length[2],
464                           (ndimensions > 2) ? length[3] : NULL,
465                           (ndimensions > 3) ? length[4] : NULL);
466   } else {
467     // Create a java array for dimension sizes
468     Node* dims = NULL;
469     { PreserveReexecuteState preexecs(this);
470       inc_sp(ndimensions);
471       Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
472       dims = new_array(dims_array_klass, intcon(ndimensions), 0);
473 
474       // Fill-in it with values
475       for (j = 0; j < ndimensions; j++) {
476         Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
477         store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered);
478       }
479     }
480 
481     c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
482                           OptoRuntime::multianewarrayN_Type(),
483                           OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
484                           makecon(TypeKlassPtr::make(array_klass)),
485                           dims);
486   }
487   make_slow_call_ex(c, env()->Throwable_klass(), false);
488 
489   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms));
490 
491   const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
492 
493   // Improve the type:  We know it's not null, exact, and of a given length.
494   type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
495   type = type->is_aryptr()->cast_to_exactness(true);
496 
497   const TypeInt* ltype = _gvn.find_int_type(length[0]);
498   if (ltype != NULL)
499     type = type->is_aryptr()->cast_to_size(ltype);
500 
501     // We cannot sharpen the nested sub-arrays, since the top level is mutable.
502 
503   Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) );
504   push(cast);
505 
506   // Possible improvements:
507   // - Make a fast path for small multi-arrays.  (W/ implicit init. loops.)
508   // - Issue CastII against length[*] values, to TypeInt::POS.
509 }