1 /*
   2  * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "interpreter/interpreter.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "oops/fieldStreams.hpp"
  29 #include "oops/method.hpp"
  30 #include "oops/objArrayKlass.hpp"
  31 #include "oops/valueKlass.hpp"
  32 #include "oops/valueArrayKlass.hpp"
  33 
  34 Method* ValueKlass::factory_method() const {
  35   for (int i = 0; i < methods()->length(); i++) {
  36     ConstMethod* cm = methods()->at(i)->constMethod();
  37     if (cm->has_valuefactory_parameter_mapping()) {
  38       return methods()->at(i);
  39     }
  40   }
  41   return NULL;
  42 }
  43 
  44 int ValueKlass::first_field_offset() {
  45 #ifdef ASSERT
  46   instanceKlassHandle k(this);
  47   int first_offset = INT_MAX;
  48   for (JavaFieldStream fs(k()); !fs.done(); fs.next()) {
  49     if (fs.offset() < first_offset) first_offset= fs.offset();
  50   }
  51 #endif
  52   int base_offset = instanceOopDesc::base_offset_in_bytes();
  53   // The first field of value types is aligned on a long boundary
  54   base_offset = align_size_up(base_offset, BytesPerLong);
  55   assert(base_offset = first_offset, "inconsistent offsets");
  56   return base_offset;
  57 }
  58 
  59 bool ValueKlass::is_atomic() {
  60   return (nonstatic_field_size() * heapOopSize) <= longSize;
  61 }
  62 
  63 int ValueKlass::nonstatic_oop_count() {
  64   int oops = 0;
  65   int map_count = nonstatic_oop_map_count();
  66   OopMapBlock* block = start_of_nonstatic_oop_maps();
  67   OopMapBlock* end = block + map_count;
  68   while (block != end) {
  69     oops += block->count();
  70     block++;
  71   }
  72   return oops;
  73 }
  74 
  75 // Arrays of...
  76 
  77 bool ValueKlass::flatten_array() {
  78   if (!ValueArrayFlatten) {
  79     return false;
  80   }
  81 
  82   int elem_bytes = raw_value_byte_size();
  83   // Too big
  84   if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) {
  85     return false;
  86   }
  87   // Too many embedded oops
  88   if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) {
  89     return false;
  90   }
  91 
  92   return true;
  93 }
  94 
  95 
  96 Klass* ValueKlass::array_klass_impl(instanceKlassHandle this_k, bool or_null, int n, TRAPS) {
  97   if (!ValueKlass::cast(this_k())->flatten_array()) {
  98     return InstanceKlass::array_klass_impl(this_k, or_null, n, THREAD);
  99   }
 100 
 101   // Basically the same as instanceKlass, but using "ValueArrayKlass::allocate_klass"
 102   if (this_k->array_klasses() == NULL) {
 103     if (or_null) return NULL;
 104 
 105     ResourceMark rm;
 106     JavaThread *jt = (JavaThread *)THREAD;
 107     {
 108       // Atomic creation of array_klasses
 109       MutexLocker mc(Compile_lock, THREAD);   // for vtables
 110       MutexLocker ma(MultiArray_lock, THREAD);
 111 
 112       // Check if update has already taken place
 113       if (this_k->array_klasses() == NULL) {
 114         Klass* ak;
 115         if (ValueKlass::cast(this_k())->is_atomic() || (!ValueArrayAtomicAccess)) {
 116           ak = ValueArrayKlass::allocate_klass(this_k, CHECK_NULL);
 117         }
 118         else {
 119           ak = ObjArrayKlass::allocate_objArray_klass(this_k->class_loader_data(), 1, this_k, CHECK_NULL);
 120         }
 121         this_k->set_array_klasses(ak);
 122       }
 123     }
 124   }
 125   // _this will always be set at this point
 126   ArrayKlass* ak = ArrayKlass::cast(this_k->array_klasses());
 127   if (or_null) {
 128     return ak->array_klass_or_null(n);
 129   }
 130   return ak->array_klass(n, THREAD);
 131 }
 132 
 133 Klass* ValueKlass::array_klass_impl(bool or_null, int n, TRAPS) {
 134   instanceKlassHandle this_k(THREAD, this);
 135   return array_klass_impl(this_k, or_null, n, THREAD);
 136 }
 137 
 138 Klass* ValueKlass::array_klass_impl(bool or_null, TRAPS) {
 139   return array_klass_impl(or_null, 1, THREAD);
 140 }
 141 
 142 /*
 143  * Store the value of this klass contained with src into dst.
 144  *
 145  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 146  *
 147  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 148  * so raw point is "safe" for now.
 149  *
 150  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 151  */
 152 void ValueKlass::value_store(void* src, void* dst, bool dst_heap, bool dst_uninitialized) {
 153   // So the raw "memcpy" bytes size...
 154   size_t raw_byte_size = raw_value_byte_size();
 155 
 156   if (contains_oops() && dst_heap) {
 157     // src/dst aren't oops, need offset to adjust oop map offset
 158     int oop_offset = valueOopDescBase();
 159 
 160     // Pre-barriers...
 161     OopMapBlock* map = start_of_nonstatic_oop_maps();
 162     OopMapBlock* const end = map + nonstatic_oop_map_count();
 163     while (map != end) {
 164       // Shame we can't just use the existing oop iterator...src/dst aren't oop
 165       uintptr_t doop_address = ((uintptr_t)dst) + (map->offset() - oop_offset);
 166       if (UseCompressedOops) {
 167         oopDesc::bs()->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized);
 168       }
 169       else {
 170         oopDesc::bs()->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized);
 171       }
 172       map++;
 173     }
 174 
 175     // Actual store...
 176     // oop atomic copy, even if both dst & src don't require atomic ?
 177     Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size / sizeof(jlong));
 178 
 179     // Post-barriers...
 180     map = start_of_nonstatic_oop_maps();
 181     while (map != end) {
 182       uintptr_t doop_address = ((uintptr_t)dst) + (map->offset() - oop_offset);
 183       oopDesc::bs()->write_ref_array((HeapWord*) doop_address, map->count());
 184       map++;
 185     }
 186   }
 187   else {   // Primitive-only case...
 188     memcpy(dst, src, raw_byte_size); // Actual store
 189   }
 190 
 191 }