< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page

        

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -32,10 +32,12 @@
 #include "c1/c1_ValueStack.hpp"
 #include "ci/ciArrayKlass.hpp"
 #include "ci/ciInstance.hpp"
 #include "ci/ciObjArray.hpp"
 #include "ci/ciUtilities.hpp"
+#include "ci/ciValueArrayKlass.hpp"
+#include "ci/ciValueKlass.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/c1/barrierSetC1.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"

@@ -639,17 +641,18 @@
     default: ShouldNotReachHere();
   }
 }
 
 
-void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
+void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no,
+                                 CodeEmitInfo* info_for_exception, CodeEmitInfo* info, CodeStub* throw_imse_stub) {
   if (!GenerateSynchronizationCode) return;
   // for slow path, use debug info for state after successful locking
-  CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
+  CodeStub* slow_path = new MonitorEnterStub(object, lock, info, throw_imse_stub, scratch);
   __ load_stack_address_monitor(monitor_no, lock);
   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
-  __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
+  __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception, throw_imse_stub);
 }
 
 
 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
   if (!GenerateSynchronizationCode) return;

@@ -787,10 +790,20 @@
 
   // if a probable array type has been identified, figure out if any
   // of the required checks for a fast case can be elided.
   int flags = LIR_OpArrayCopy::all_flags;
 
+  if (!src->is_loaded_flattened_array() && !dst->is_loaded_flattened_array()) {
+    flags &= ~LIR_OpArrayCopy::always_slow_path;
+  }
+  if (!src->maybe_flattened_array()) {
+    flags &= ~LIR_OpArrayCopy::src_flat_check;
+  }
+  if (!dst->maybe_flattened_array()) {
+    flags &= ~LIR_OpArrayCopy::dst_flat_check;
+  }
+
   if (!src_objarray)
     flags &= ~LIR_OpArrayCopy::src_objarray;
   if (!dst_objarray)
     flags &= ~LIR_OpArrayCopy::dst_objarray;
 

@@ -1528,15 +1541,24 @@
 #endif
 
   if (x->needs_null_check() &&
       (needs_patching ||
        MacroAssembler::needs_explicit_null_check(x->offset()))) {
+    if (needs_patching && x->field()->signature()->starts_with("Q", 1)) {
+      // We are storing a field of type "QT;", but T is not yet loaded, so we don't
+      // know whether this field is flattened or not. Let's deoptimize and recompile.
+      CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
+                                          Deoptimization::Reason_unloaded,
+                                          Deoptimization::Action_make_not_entrant);
+      __ branch(lir_cond_always, T_ILLEGAL, stub);
+    } else {
     // Emit an explicit null check because the offset is too large.
     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
   }
+  }
 
   DecoratorSet decorators = IN_HEAP;
   if (is_volatile) {
     decorators |= MO_SEQ_CST;
   }

@@ -1546,16 +1568,139 @@
 
   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
 }
 
+// FIXME -- I can't find any other way to pass an address to access_load_at().
+class TempResolvedAddress: public Instruction {
+ public:
+  TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
+    set_operand(addr);
+  }
+  virtual void input_values_do(ValueVisitor*) {}
+  virtual void visit(InstructionVisitor* v)   {}
+  virtual const char* name() const  { return "TempResolvedAddress"; }
+};
+
+void LIRGenerator::access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item) {
+  // Find the starting address of the source (inside the array)
+  ciType* array_type = array.value()->declared_type();
+  ciValueArrayKlass* value_array_klass = array_type->as_value_array_klass();
+  assert(value_array_klass->is_loaded(), "must be");
+
+  ciValueKlass* elem_klass = value_array_klass->element_klass()->as_value_klass();
+  int array_header_size = value_array_klass->array_header_in_bytes();
+  int shift = value_array_klass->log2_element_size();
+
+#ifndef _LP64
+  LIR_Opr index_op = new_register(T_INT);
+  // FIXME -- on 32-bit, the shift below can overflow, so we need to check that
+  // the top (shift+1) bits of index_op must be zero, or
+  // else throw ArrayIndexOutOfBoundsException
+  if (index.result()->is_constant()) {
+    jint const_index = index.result()->as_jint();
+    __ move(LIR_OprFact::intConst(const_index << shift), index_op);
+  } else {
+    __ shift_left(index_op, shift, index.result());
+  }
+#else
+  LIR_Opr index_op = new_register(T_LONG);
+  if (index.result()->is_constant()) {
+    jint const_index = index.result()->as_jint();
+    __ move(LIR_OprFact::longConst(const_index << shift), index_op);
+  } else {
+    __ convert(Bytecodes::_i2l, index.result(), index_op);
+    // Need to shift manually, as LIR_Address can scale only up to 3.
+    __ shift_left(index_op, shift, index_op);
+  }
+#endif
+
+  LIR_Opr elm_op = new_pointer_register();
+  LIR_Address* elm_address = new LIR_Address(array.result(), index_op, array_header_size, T_ADDRESS);
+  __ leal(LIR_OprFact::address(elm_address), elm_op);
+
+  for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
+    ciField* inner_field = elem_klass->nonstatic_field_at(i);
+    assert(!inner_field->is_flattened(), "flattened fields must have been expanded");
+    int obj_offset = inner_field->offset();
+    int elm_offset = obj_offset - elem_klass->first_field_offset(); // object header is not stored in array.
+
+    BasicType field_type = inner_field->type()->basic_type();
+    switch (field_type) {
+    case T_BYTE:
+    case T_BOOLEAN:
+    case T_SHORT:
+    case T_CHAR:
+     field_type = T_INT;
+      break;
+    default:
+      break;
+    }
+
+    LIR_Opr temp = new_register(field_type);
+    TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
+    LIRItem elm_item(elm_resolved_addr, this);
+
+    DecoratorSet decorators = IN_HEAP;
+    if (is_load) {
+      access_load_at(decorators, field_type,
+                     elm_item, LIR_OprFact::intConst(elm_offset), temp,
+                     NULL, NULL);
+      access_store_at(decorators, field_type,
+                      obj_item, LIR_OprFact::intConst(obj_offset), temp,
+                      NULL, NULL);
+    } else {
+    access_load_at(decorators, field_type,
+                   obj_item, LIR_OprFact::intConst(obj_offset), temp,
+                   NULL, NULL);
+    access_store_at(decorators, field_type,
+                    elm_item, LIR_OprFact::intConst(elm_offset), temp,
+                    NULL, NULL);
+    }
+  }
+}
+
+void LIRGenerator::check_flattened_array(LIRItem& array, CodeStub* slow_path) {
+  LIR_Opr array_klass_reg = new_register(T_METADATA);
+
+  __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg);
+  LIR_Opr layout = new_register(T_INT);
+  __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
+  __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
+  __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
+  __ branch(lir_cond_equal, T_ILLEGAL, slow_path);
+}
+
+bool LIRGenerator::needs_flattened_array_store_check(StoreIndexed* x) {
+  if (ValueArrayFlatten && x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
+    ciType* type = x->value()->declared_type();
+    if (type != NULL && type->is_klass()) {
+      ciKlass* klass = type->as_klass();
+      if (klass->is_loaded() &&
+          !(klass->is_valuetype() && klass->as_value_klass()->flatten_array()) &&
+          !klass->is_java_lang_Object() &&
+          !klass->is_interface()) {
+        // This is known to be a non-flattenable object. If the array is flattened,
+        // it will be caught by the code generated by array_store_check().
+        return false;
+      }
+    }
+    // We're not 100% sure, so let's do the flattened_array_store_check.
+    return true;
+  }
+  return false;
+}
+
 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
   assert(x->is_pinned(),"");
+  assert(x->elt_type() != T_ARRAY, "never used");
+  bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array();
   bool needs_range_check = x->compute_needs_range_check();
   bool use_length = x->length() != NULL;
-  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
-  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
+  bool obj_store = x->elt_type() == T_OBJECT;
+  bool needs_store_check = obj_store && !is_loaded_flattened_array &&
+                                        (x->value()->as_Constant() == NULL ||
                                          !get_jobject_constant(x->value())->is_null_object() ||
                                          x->should_profile());
 
   LIRItem array(x->array(), this);
   LIRItem index(x->index(), this);

@@ -1566,13 +1711,14 @@
   index.load_nonconstant();
 
   if (use_length && needs_range_check) {
     length.set_instruction(x->length());
     length.load_item();
-
   }
-  if (needs_store_check || x->check_boolean()) {
+
+  if (needs_store_check || x->check_boolean()
+      || is_loaded_flattened_array || needs_flattened_array_store_check(x)) {
     value.load_item();
   } else {
     value.load_for_store(x->elt_type());
   }
 

@@ -1601,17 +1747,40 @@
   if (GenerateArrayStoreCheck && needs_store_check) {
     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
   }
 
+  if (is_loaded_flattened_array) {
+    if (!x->is_exact_flattened_array_store()) {
+      CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
+      ciKlass* element_klass = x->array()->declared_type()->as_value_array_klass()->element_klass();
+      flattened_array_store_check(value.result(), element_klass, info);
+    } else if (!x->value()->is_never_null()) {
+      __ null_check(value.result(), new CodeEmitInfo(range_check_info));
+    }
+    access_flattened_array(false, array, index, value);
+  } else {
+    StoreFlattenedArrayStub* slow_path = NULL;
+
+    if (needs_flattened_array_store_check(x)) {
+      // Check if we indeed have a flattened array
+      index.load_item();
+      slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x));
+      check_flattened_array(array, slow_path);
+    }
+
   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
   if (x->check_boolean()) {
     decorators |= C1_MASK_BOOLEAN;
   }
 
   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
                   NULL, null_check_info);
+    if (slow_path != NULL) {
+      __ branch_destination(slow_path->continuation());
+    }
+  }
 }
 
 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {

@@ -1731,20 +1900,37 @@
   bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
   if (x->needs_null_check() &&
       (needs_patching ||
        MacroAssembler::needs_explicit_null_check(x->offset()) ||
        stress_deopt)) {
+    if (needs_patching && x->field()->signature()->starts_with("Q", 1)) {
+      // We are loading a field of type "QT;", but class T is not yet loaded. We don't know
+      // whether this field is flattened or not. Let's deoptimize and recompile.
+      CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
+                                          Deoptimization::Reason_unloaded,
+                                          Deoptimization::Action_make_not_entrant);
+      __ branch(lir_cond_always, T_ILLEGAL, stub);
+    } else {
     LIR_Opr obj = object.result();
     if (stress_deopt) {
       obj = new_register(T_OBJECT);
       __ move(LIR_OprFact::oopConst(NULL), obj);
     }
     // Emit an explicit null check because the offset is too large.
     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
     __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
   }
+  } else if (x->value_klass() != NULL && x->default_value() == NULL) {
+    assert(x->is_static() && !x->value_klass()->is_loaded(), "must be");
+    assert(needs_patching, "must be");
+    // The value klass was not loaded so we don't know what its default value should be
+    CodeStub* stub = new DeoptimizeStub(new CodeEmitInfo(info),
+                                        Deoptimization::Reason_unloaded,
+                                        Deoptimization::Action_make_not_entrant);
+    __ branch(lir_cond_always, T_ILLEGAL, stub);
+  }
 
   DecoratorSet decorators = IN_HEAP;
   if (is_volatile) {
     decorators |= MO_SEQ_CST;
   }

@@ -1754,10 +1940,22 @@
 
   LIR_Opr result = rlock_result(x, field_type);
   access_load_at(decorators, field_type,
                  object, LIR_OprFact::intConst(x->offset()), result,
                  info ? new CodeEmitInfo(info) : NULL, info);
+
+  if (x->value_klass() != NULL && x->default_value() != NULL) {
+    LabelObj* L_end = new LabelObj();
+    __ cmp(lir_cond_notEqual, result, LIR_OprFact::oopConst(NULL));
+    __ branch(lir_cond_notEqual, T_OBJECT, L_end->label());
+
+    LIRItem default_value(x->default_value(), this);
+    default_value.load_item();
+    __ move(default_value.result(), result);
+
+    __ branch_destination(L_end->label());
+  }
 }
 
 
 //------------------------java.nio.Buffer.checkIndex------------------------
 

@@ -1868,16 +2066,37 @@
       // The range check performs the null check, so clear it out for the load
       null_check_info = NULL;
     }
   }
 
-  DecoratorSet decorators = IN_HEAP | IS_ARRAY;
+  if (x->array()->is_loaded_flattened_array()) {
+    // Find the destination address (of the NewValueTypeInstance)
+    LIR_Opr obj = x->vt()->operand();
+    LIRItem obj_item(x->vt(), this);
 
+    access_flattened_array(true, array, index, obj_item);
+    set_no_result(x);
+  } else {
   LIR_Opr result = rlock_result(x, x->elt_type());
+    LoadFlattenedArrayStub* slow_path = NULL;
+
+    if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
+      index.load_item();
+      // if we are loading from flattened array, load it using a runtime call
+      slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x));
+      check_flattened_array(array, slow_path);
+    }
+
+    DecoratorSet decorators = IN_HEAP | IS_ARRAY;
   access_load_at(decorators, x->elt_type(),
                  array, index.result(), result,
                  NULL, null_check_info);
+
+    if (slow_path != NULL) {
+      __ branch_destination(slow_path->continuation());
+    }
+  }
 }
 
 
 void LIRGenerator::do_NullCheck(NullCheck* x) {
   if (x->can_trap()) {

@@ -2733,10 +2952,11 @@
     if (loc->is_register()) {
       param->load_item_force(loc);
     } else {
       LIR_Address* addr = loc->as_address_ptr();
       param->load_for_store(addr->type());
+      assert(addr->type() != T_VALUETYPE, "not supported yet");
       if (addr->type() == T_OBJECT) {
         __ move_wide(param->result(), addr);
       } else
         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
           __ unaligned_move(param->result(), addr);
< prev index next >