1 /* 2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/interpreter.hpp" 27 #include "oops/oop.inline.hpp" 28 #include "oops/fieldStreams.hpp" 29 #include "oops/method.hpp" 30 #include "oops/objArrayKlass.hpp" 31 #include "oops/valueKlass.hpp" 32 #include "oops/valueArrayKlass.hpp" 33 #include "utilities/copy.hpp" 34 35 int ValueKlass::first_field_offset() const { 36 #ifdef ASSERT 37 int first_offset = INT_MAX; 38 for (JavaFieldStream fs(this); !fs.done(); fs.next()) { 39 if (fs.offset() < first_offset) first_offset= fs.offset(); 40 } 41 #endif 42 int base_offset = instanceOopDesc::base_offset_in_bytes(); 43 // The first field of value types is aligned on a long boundary 44 base_offset = align_size_up(base_offset, BytesPerLong); 45 assert(base_offset = first_offset, "inconsistent offsets"); 46 return base_offset; 47 } 48 49 int ValueKlass::raw_value_byte_size() const { 50 int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop; 51 // If bigger than 64 bits or needs oop alignment, then use jlong aligned 52 // which for values should be jlong aligned, asserts in raw_field_copy otherwise 53 if (heapOopAlignedSize >= longSize || contains_oops()) { 54 return heapOopAlignedSize; 55 } 56 // Small primitives... 57 // If a few small basic type fields, return the actual size, i.e. 58 // 1 byte = 1 59 // 2 byte = 2 60 // 3 byte = 4, because pow2 needed for element stores 61 int first_offset = first_field_offset(); 62 int last_offset = 0; // find the last offset, add basic type size 63 for (JavaFieldStream fs(this); !fs.done(); fs.next()) { 64 if (fs.offset() > last_offset) { 65 int tsz = 0; 66 BasicType type = fs.field_descriptor().field_type(); 67 if (is_java_primitive(type)) { 68 tsz = type2aelembytes(type); 69 } else if (type == T_VALUETYPE) { 70 // Not just primitives. Layout aligns embedded value, so use jlong aligned it is 71 return heapOopAlignedSize; 72 } else { 73 guarantee(0, "Unknown type %d", type); 74 } 75 assert(tsz > 0, "Invariant"); 76 last_offset = fs.offset() + tsz; 77 } 78 } 79 assert(last_offset > first_offset, "Invariant"); 80 return 1 << upper_log2(last_offset - first_offset); 81 } 82 83 bool ValueKlass::is_atomic() { 84 return (nonstatic_field_size() * heapOopSize) <= longSize; 85 } 86 87 int ValueKlass::nonstatic_oop_count() { 88 int oops = 0; 89 int map_count = nonstatic_oop_map_count(); 90 OopMapBlock* block = start_of_nonstatic_oop_maps(); 91 OopMapBlock* end = block + map_count; 92 while (block != end) { 93 oops += block->count(); 94 block++; 95 } 96 return oops; 97 } 98 99 // Arrays of... 100 101 bool ValueKlass::flatten_array() { 102 if (!ValueArrayFlatten) { 103 return false; 104 } 105 106 int elem_bytes = raw_value_byte_size(); 107 // Too big 108 if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) { 109 return false; 110 } 111 // Too many embedded oops 112 if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) { 113 return false; 114 } 115 116 return true; 117 } 118 119 120 Klass* ValueKlass::array_klass_impl(bool or_null, int n, TRAPS) { 121 if (!flatten_array()) { 122 return InstanceKlass::array_klass_impl(or_null, n, THREAD); 123 } 124 125 // Basically the same as instanceKlass, but using "ValueArrayKlass::allocate_klass" 126 if (array_klasses() == NULL) { 127 if (or_null) return NULL; 128 129 ResourceMark rm; 130 JavaThread *jt = (JavaThread *)THREAD; 131 { 132 // Atomic creation of array_klasses 133 MutexLocker mc(Compile_lock, THREAD); // for vtables 134 MutexLocker ma(MultiArray_lock, THREAD); 135 136 // Check if update has already taken place 137 if (array_klasses() == NULL) { 138 Klass* ak; 139 if (is_atomic() || (!ValueArrayAtomicAccess)) { 140 ak = ValueArrayKlass::allocate_klass(this, CHECK_NULL); 141 } else { 142 ak = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL); 143 } 144 set_array_klasses(ak); 145 } 146 } 147 } 148 // _this will always be set at this point 149 ArrayKlass* ak = ArrayKlass::cast(array_klasses()); 150 if (or_null) { 151 return ak->array_klass_or_null(n); 152 } 153 return ak->array_klass(n, THREAD); 154 } 155 156 Klass* ValueKlass::array_klass_impl(bool or_null, TRAPS) { 157 return array_klass_impl(or_null, 1, THREAD); 158 } 159 160 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) { 161 /* 162 * Try not to shear fields even if not an atomic store... 163 * 164 * First 3 cases handle value array store, otherwise works on the same basis 165 * as JVM_Clone, at this size data is aligned. The order of primitive types 166 * is largest to smallest, and it not possible for fields to stradle long 167 * copy boundaries. 168 * 169 * If MT without exclusive access, possible to observe partial value store, 170 * but not partial primitive and reference field values 171 */ 172 switch (raw_byte_size) { 173 case 1: 174 *((jbyte*) dst) = *(jbyte*)src; 175 break; 176 case 2: 177 *((jshort*) dst) = *(jshort*)src; 178 break; 179 case 4: 180 *((jint*) dst) = *(jint*) src; 181 break; 182 default: 183 assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size"); 184 Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong); 185 } 186 } 187 188 /* 189 * Store the value of this klass contained with src into dst. 190 * 191 * This operation is appropriate for use from vastore, vaload and putfield (for values) 192 * 193 * GC barriers currently can lock with no safepoint check and allocate c-heap, 194 * so raw point is "safe" for now. 195 * 196 * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts 197 * 198 */ 199 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) { 200 if (contains_oops() && dst_heap) { 201 // src/dst aren't oops, need offset to adjust oop map offset 202 const address dst_oop_addr = ((address) dst) - first_field_offset(); 203 204 // Pre-barriers... 205 OopMapBlock* map = start_of_nonstatic_oop_maps(); 206 OopMapBlock* const end = map + nonstatic_oop_map_count(); 207 while (map != end) { 208 // Shame we can't just use the existing oop iterator...src/dst aren't oop 209 address doop_address = dst_oop_addr + map->offset(); 210 if (UseCompressedOops) { 211 oopDesc::bs()->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized); 212 } else { 213 oopDesc::bs()->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized); 214 } 215 map++; 216 } 217 218 raw_field_copy(src, dst, raw_byte_size); 219 220 // Post-barriers... 221 map = start_of_nonstatic_oop_maps(); 222 while (map != end) { 223 address doop_address = dst_oop_addr + map->offset(); 224 oopDesc::bs()->write_ref_array((HeapWord*) doop_address, map->count()); 225 map++; 226 } 227 } else { // Primitive-only case... 228 raw_field_copy(src, dst, raw_byte_size); 229 } 230 } 231 232 oop ValueKlass::derive_value_type_copy(Handle src, InstanceKlass* target_klass, TRAPS) { 233 assert(InstanceKlass::cast(src->klass())->derive_value_type_klass() == target_klass, "Not this DVT"); 234 235 // Allocate new for safety, simply reinstalling the klass pointer is a little too risky 236 target_klass->initialize(CHECK_0); 237 instanceOop value = target_klass->allocate_instance(CHECK_0); 238 value_store(data_for_oop(src()), data_for_oop(value), true, true); 239 return value; 240 } 241 242 // Value type arguments are not passed by reference, instead each 243 // field of the value type is passed as an argument. This helper 244 // function collects the fields of the value types (including embedded 245 // value type's fields) in a list. Included with the field's type is 246 // the offset of each field in the value type: i2c and c2i adapters 247 // need that to load or store fields. Finally, the list of fields is 248 // sorted in order of increasing offsets: the adapters and the 249 // compiled code need and agreed upon order of fields. 250 // 251 // The list of basic types that is returned starts with a T_VALUETYPE 252 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID are used as 253 // delimiters. Every entry between the two is a field of the value 254 // type. If there's an embedded value type in the list, it also starts 255 // with a T_VALUETYPE and ends with a T_VOID. This is so we can 256 // generate a unique fingerprint for the method's adapters and we can 257 // generate the list of basic types from the interpreter point of view 258 // (value types passed as reference: iterate on the list until a 259 // T_VALUETYPE, drop everything until and including the closing 260 // T_VOID) or the compiler point of view (each field of the value 261 // types is an argument: drop all T_VALUETYPE/T_VOID from the list). 262 GrowableArray<SigEntry> ValueKlass::collect_fields(int base_off) const { 263 GrowableArray<SigEntry> sig_extended; 264 sig_extended.push(SigEntry(T_VALUETYPE, base_off)); 265 for (JavaFieldStream fs(this); !fs.done(); fs.next()) { 266 if (fs.access_flags().is_static()) continue; 267 fieldDescriptor& fd = fs.field_descriptor(); 268 BasicType bt = fd.field_type(); 269 int offset = base_off + fd.offset() - (base_off > 0 ? first_field_offset() : 0); 270 if (bt == T_VALUETYPE) { 271 Symbol* signature = fd.signature(); 272 JavaThread* THREAD = JavaThread::current(); 273 oop loader = class_loader(); 274 oop domain = protection_domain(); 275 ResetNoHandleMark rnhm; 276 HandleMark hm; 277 NoSafepointVerifier nsv; 278 Klass* klass = SystemDictionary::resolve_or_null(signature, 279 Handle(THREAD, loader), Handle(THREAD, domain), 280 THREAD); 281 assert(klass != NULL && !HAS_PENDING_EXCEPTION, "lookup shouldn't fail"); 282 const GrowableArray<SigEntry>& embedded = ValueKlass::cast(klass)->collect_fields(offset); 283 sig_extended.appendAll(&embedded); 284 } else { 285 sig_extended.push(SigEntry(bt, offset)); 286 if (bt == T_LONG || bt == T_DOUBLE) { 287 sig_extended.push(SigEntry(T_VOID, offset)); 288 } 289 } 290 } 291 int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0); 292 sig_extended.push(SigEntry(T_VOID, offset)); // hack: use T_VOID to mark end of value type fields 293 if (base_off == 0) { 294 sig_extended.sort(SigEntry::compare); 295 } 296 assert(sig_extended.at(0)._bt == T_VALUETYPE && sig_extended.at(sig_extended.length()-1)._bt == T_VOID, "broken structure"); 297 return sig_extended; 298 } 299 300 // Returns the basic types and registers for fields to return an 301 // instance of this value type in registers if possible. 302 GrowableArray<SigEntry> ValueKlass::return_convention(VMRegPair*& regs, int& nb_fields) const { 303 assert(ValueTypeReturnedAsFields, "inconsistent"); 304 const GrowableArray<SigEntry>& sig_vk = collect_fields(); 305 nb_fields = SigEntry::count_fields(sig_vk)+1; 306 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields); 307 sig_bt[0] = T_METADATA; 308 SigEntry::fill_sig_bt(sig_vk, sig_bt+1, nb_fields-1, true); 309 regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields); 310 int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields); 311 312 if (total <= 0) { 313 regs = NULL; 314 } 315 316 return sig_vk; 317 } 318 319 // Create handles for all oop fields returned in registers that are 320 // going to be live across a safepoint. 321 bool ValueKlass::save_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const { 322 if (ValueTypeReturnedAsFields) { 323 int nb_fields; 324 VMRegPair* regs; 325 const GrowableArray<SigEntry>& sig_vk = return_convention(regs, nb_fields); 326 327 if (regs != NULL) { 328 regs++; 329 nb_fields--; 330 save_oop_fields(sig_vk, reg_map, regs, handles, nb_fields); 331 return true; 332 } 333 } 334 return false; 335 } 336 337 // Same as above but with pre-computed return convention 338 void ValueKlass::save_oop_fields(const GrowableArray<SigEntry>& sig_vk, RegisterMap& reg_map, const VMRegPair* regs, GrowableArray<Handle>& handles, int nb_fields) const { 339 int j = 0; 340 Thread* thread = Thread::current(); 341 for (int i = 0; i < sig_vk.length(); i++) { 342 BasicType bt = sig_vk.at(i)._bt; 343 if (bt == T_OBJECT || bt == T_ARRAY) { 344 int off = sig_vk.at(i)._offset; 345 VMRegPair pair = regs[j]; 346 address loc = reg_map.location(pair.first()); 347 oop v = *(oop*)loc; 348 assert(v == NULL || v->is_oop(), "not an oop?"); 349 assert(Universe::heap()->is_in_or_null(v), "must be heap pointer"); 350 handles.push(Handle(thread, v)); 351 } 352 if (bt == T_VALUETYPE) { 353 continue; 354 } 355 if (bt == T_VOID && 356 sig_vk.at(i-1)._bt != T_LONG && 357 sig_vk.at(i-1)._bt != T_DOUBLE) { 358 continue; 359 } 360 j++; 361 } 362 assert(j == nb_fields, "missed a field?"); 363 } 364 365 // Update oop fields in registers from handles after a safepoint 366 void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const { 367 assert(ValueTypeReturnedAsFields, "inconsistent"); 368 int nb_fields; 369 VMRegPair* regs; 370 const GrowableArray<SigEntry>& sig_vk = return_convention(regs, nb_fields); 371 assert(regs != NULL, "inconsistent"); 372 373 regs++; 374 nb_fields--; 375 376 int j = 0; 377 for (int i = 0, k = 0; i < sig_vk.length(); i++) { 378 BasicType bt = sig_vk.at(i)._bt; 379 if (bt == T_OBJECT || bt == T_ARRAY) { 380 int off = sig_vk.at(i)._offset; 381 VMRegPair pair = regs[j]; 382 address loc = reg_map.location(pair.first()); 383 *(oop*)loc = handles.at(k++)(); 384 } 385 if (bt == T_VALUETYPE) { 386 continue; 387 } 388 if (bt == T_VOID && 389 sig_vk.at(i-1)._bt != T_LONG && 390 sig_vk.at(i-1)._bt != T_DOUBLE) { 391 continue; 392 } 393 j++; 394 } 395 assert(j == nb_fields, "missed a field?"); 396 } 397 398 // Fields are in registers. Create an instance of the value type and 399 // initialize it with the values of the fields. 400 oop ValueKlass::realloc_result(const GrowableArray<SigEntry>& sig_vk, const RegisterMap& reg_map, const VMRegPair* regs, 401 const GrowableArray<Handle>& handles, int nb_fields, TRAPS) { 402 oop new_vt = allocate_instance(CHECK_NULL); 403 404 int j = 0; 405 int k = 0; 406 for (int i = 0; i < sig_vk.length(); i++) { 407 BasicType bt = sig_vk.at(i)._bt; 408 if (bt == T_VALUETYPE) { 409 continue; 410 } 411 if (bt == T_VOID) { 412 if (sig_vk.at(i-1)._bt == T_LONG || 413 sig_vk.at(i-1)._bt == T_DOUBLE) { 414 j++; 415 } 416 continue; 417 } 418 int off = sig_vk.at(i)._offset; 419 VMRegPair pair = regs[j]; 420 address loc = reg_map.location(pair.first()); 421 switch(bt) { 422 case T_BOOLEAN: { 423 jboolean v = *(intptr_t*)loc; 424 *(jboolean*)((address)new_vt + off) = v; 425 break; 426 } 427 case T_CHAR: { 428 jchar v = *(intptr_t*)loc; 429 *(jchar*)((address)new_vt + off) = v; 430 break; 431 } 432 case T_BYTE: { 433 jbyte v = *(intptr_t*)loc; 434 *(jbyte*)((address)new_vt + off) = v; 435 break; 436 } 437 case T_SHORT: { 438 jshort v = *(intptr_t*)loc; 439 *(jshort*)((address)new_vt + off) = v; 440 break; 441 } 442 case T_INT: { 443 jint v = *(intptr_t*)loc; 444 *(jint*)((address)new_vt + off) = v; 445 break; 446 } 447 case T_LONG: { 448 #ifdef _LP64 449 jlong v = *(intptr_t*)loc; 450 *(jlong*)((address)new_vt + off) = v; 451 #else 452 Unimplemented(); 453 #endif 454 break; 455 } 456 case T_OBJECT: 457 case T_ARRAY: { 458 Handle handle = handles.at(k++); 459 oop v = handle(); 460 if (!UseCompressedOops) { 461 oop* p = (oop*)((address)new_vt + off); 462 oopDesc::store_heap_oop(p, v); 463 } else { 464 narrowOop* p = (narrowOop*)((address)new_vt + off); 465 oopDesc::encode_store_heap_oop(p, v); 466 } 467 break; 468 } 469 case T_FLOAT: { 470 jfloat v = *(jfloat*)loc; 471 *(jfloat*)((address)new_vt + off) = v; 472 break; 473 } 474 case T_DOUBLE: { 475 jdouble v = *(jdouble*)loc; 476 *(jdouble*)((address)new_vt + off) = v; 477 break; 478 } 479 default: 480 ShouldNotReachHere(); 481 } 482 j++; 483 } 484 assert(j == nb_fields, "missed a field?"); 485 assert(k == handles.length(), "missed an oop?"); 486 return new_vt; 487 } 488 489 ValueKlass* ValueKlass::returned_value_type(const RegisterMap& map) { 490 BasicType bt = T_METADATA; 491 VMRegPair pair; 492 int nb = SharedRuntime::java_return_convention(&bt, &pair, 1); 493 assert(nb == 1, "broken"); 494 495 address loc = map.location(pair.first()); 496 intptr_t ptr = *(intptr_t*)loc; 497 if (Universe::heap()->is_in_reserved((void*)ptr)) { 498 return NULL; 499 } 500 return (ValueKlass*)ptr; 501 }