/* * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "interpreter/interpreter.hpp" #include "oops/oop.inline.hpp" #include "oops/fieldStreams.hpp" #include "oops/method.hpp" #include "oops/objArrayKlass.hpp" #include "oops/valueKlass.hpp" #include "oops/valueArrayKlass.hpp" Method* ValueKlass::factory_method() const { for (int i = 0; i < methods()->length(); i++) { ConstMethod* cm = methods()->at(i)->constMethod(); if (cm->has_valuefactory_parameter_mapping()) { return methods()->at(i); } } return NULL; } int ValueKlass::first_field_offset() { #ifdef ASSERT instanceKlassHandle k(this); int first_offset = INT_MAX; for (JavaFieldStream fs(k()); !fs.done(); fs.next()) { if (fs.offset() < first_offset) first_offset= fs.offset(); } #endif int base_offset = instanceOopDesc::base_offset_in_bytes(); // The first field of value types is aligned on a long boundary base_offset = align_size_up(base_offset, BytesPerLong); assert(base_offset = first_offset, "inconsistent offsets"); return base_offset; } bool ValueKlass::is_atomic() { return (nonstatic_field_size() * heapOopSize) <= longSize; } int ValueKlass::nonstatic_oop_count() { int oops = 0; int map_count = nonstatic_oop_map_count(); OopMapBlock* block = start_of_nonstatic_oop_maps(); OopMapBlock* end = block + map_count; while (block != end) { oops += block->count(); block++; } return oops; } // Arrays of... bool ValueKlass::flatten_array() { if (!ValueArrayFlatten) { return false; } int elem_bytes = raw_value_byte_size(); // Too big if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) { return false; } // Too many embedded oops if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) { return false; } return true; } Klass* ValueKlass::array_klass_impl(instanceKlassHandle this_k, bool or_null, int n, TRAPS) { if (!ValueKlass::cast(this_k())->flatten_array()) { return InstanceKlass::array_klass_impl(this_k, or_null, n, THREAD); } // Basically the same as instanceKlass, but using "ValueArrayKlass::allocate_klass" if (this_k->array_klasses() == NULL) { if (or_null) return NULL; ResourceMark rm; JavaThread *jt = (JavaThread *)THREAD; { // Atomic creation of array_klasses MutexLocker mc(Compile_lock, THREAD); // for vtables MutexLocker ma(MultiArray_lock, THREAD); // Check if update has already taken place if (this_k->array_klasses() == NULL) { Klass* ak; if (ValueKlass::cast(this_k())->is_atomic() || (!ValueArrayAtomicAccess)) { ak = ValueArrayKlass::allocate_klass(this_k, CHECK_NULL); } else { ak = ObjArrayKlass::allocate_objArray_klass(this_k->class_loader_data(), 1, this_k, CHECK_NULL); } this_k->set_array_klasses(ak); } } } // _this will always be set at this point ArrayKlass* ak = ArrayKlass::cast(this_k->array_klasses()); if (or_null) { return ak->array_klass_or_null(n); } return ak->array_klass(n, THREAD); } Klass* ValueKlass::array_klass_impl(bool or_null, int n, TRAPS) { instanceKlassHandle this_k(THREAD, this); return array_klass_impl(this_k, or_null, n, THREAD); } Klass* ValueKlass::array_klass_impl(bool or_null, TRAPS) { return array_klass_impl(or_null, 1, THREAD); } /* * Store the value of this klass contained with src into dst. * * This operation is appropriate for use from vastore, vaload and putfield (for values) * * GC barriers currently can lock with no safepoint check and allocate c-heap, * so raw point is "safe" for now. * * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts */ void ValueKlass::value_store(void* src, void* dst, bool dst_heap, bool dst_uninitialized) { // So the raw "memcpy" bytes size... size_t raw_byte_size = raw_value_byte_size(); if (contains_oops() && dst_heap) { // src/dst aren't oops, need offset to adjust oop map offset int oop_offset = valueOopDescBase(); // Pre-barriers... OopMapBlock* map = start_of_nonstatic_oop_maps(); OopMapBlock* const end = map + nonstatic_oop_map_count(); while (map != end) { // Shame we can't just use the existing oop iterator...src/dst aren't oop uintptr_t doop_address = ((uintptr_t)dst) + (map->offset() - oop_offset); if (UseCompressedOops) { oopDesc::bs()->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized); } else { oopDesc::bs()->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized); } map++; } // Actual store... // oop atomic copy, even if both dst & src don't require atomic ? Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size / sizeof(jlong)); // Post-barriers... map = start_of_nonstatic_oop_maps(); while (map != end) { uintptr_t doop_address = ((uintptr_t)dst) + (map->offset() - oop_offset); oopDesc::bs()->write_ref_array((HeapWord*) doop_address, map->count()); map++; } } else { // Primitive-only case... memcpy(dst, src, raw_byte_size); // Actual store } }