1 /* 2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/barrierSet.hpp" 27 #include "gc/shared/collectedHeap.inline.hpp" 28 #include "gc/shared/gcLocker.inline.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "logging/log.hpp" 31 #include "memory/metadataFactory.hpp" 32 #include "oops/access.hpp" 33 #include "oops/compressedOops.inline.hpp" 34 #include "oops/fieldStreams.hpp" 35 #include "oops/instanceKlass.inline.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "oops/objArrayKlass.hpp" 39 #include "oops/valueKlass.inline.hpp" 40 #include "oops/valueArrayKlass.hpp" 41 #include "runtime/fieldDescriptor.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/safepointVerifiers.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/signature.hpp" 46 #include "runtime/thread.inline.hpp" 47 #include "utilities/copy.hpp" 48 49 // Constructor 50 ValueKlass::ValueKlass(const ClassFileParser& parser) 51 : InstanceKlass(parser, InstanceKlass::_misc_kind_value_type, InstanceKlass::ID) { 52 _adr_valueklass_fixed_block = valueklass_static_block(); 53 // Addresses used for value type calling convention 54 *((Array<SigEntry>**)adr_extended_sig()) = NULL; 55 *((Array<VMRegPair>**)adr_return_regs()) = NULL; 56 *((address*)adr_pack_handler()) = NULL; 57 *((address*)adr_unpack_handler()) = NULL; 58 assert(pack_handler() == NULL, "pack handler not null"); 59 *((int*)adr_default_value_offset()) = 0; 60 *((Klass**)adr_value_array_klass()) = NULL; 61 set_prototype_header(markWord::always_locked_prototype()); 62 } 63 64 oop ValueKlass::default_value() { 65 oop val = java_mirror()->obj_field_acquire(default_value_offset()); 66 assert(oopDesc::is_oop(val), "Sanity check"); 67 assert(val->is_value(), "Sanity check"); 68 assert(val->klass() == this, "sanity check"); 69 return val; 70 } 71 72 int ValueKlass::first_field_offset_old() { 73 #ifdef ASSERT 74 int first_offset = INT_MAX; 75 for (AllFieldStream fs(this); !fs.done(); fs.next()) { 76 if (fs.offset() < first_offset) first_offset= fs.offset(); 77 } 78 #endif 79 int base_offset = instanceOopDesc::base_offset_in_bytes(); 80 // The first field of value types is aligned on a long boundary 81 base_offset = align_up(base_offset, BytesPerLong); 82 assert(base_offset == first_offset, "inconsistent offsets"); 83 return base_offset; 84 } 85 86 int ValueKlass::raw_value_byte_size() { 87 int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop; 88 // If bigger than 64 bits or needs oop alignment, then use jlong aligned 89 // which for values should be jlong aligned, asserts in raw_field_copy otherwise 90 if (heapOopAlignedSize >= longSize || contains_oops()) { 91 return heapOopAlignedSize; 92 } 93 // Small primitives... 94 // If a few small basic type fields, return the actual size, i.e. 95 // 1 byte = 1 96 // 2 byte = 2 97 // 3 byte = 4, because pow2 needed for element stores 98 int first_offset = first_field_offset(); 99 int last_offset = 0; // find the last offset, add basic type size 100 int last_tsz = 0; 101 for (AllFieldStream fs(this); !fs.done(); fs.next()) { 102 if (fs.access_flags().is_static()) { 103 continue; 104 } else if (fs.offset() > last_offset) { 105 BasicType type = char2type(fs.signature()->char_at(0)); 106 if (is_java_primitive(type)) { 107 last_tsz = type2aelembytes(type); 108 } else if (type == T_VALUETYPE) { 109 // Not just primitives. Layout aligns embedded value, so use jlong aligned it is 110 return heapOopAlignedSize; 111 } else { 112 guarantee(0, "Unknown type %d", type); 113 } 114 assert(last_tsz != 0, "Invariant"); 115 last_offset = fs.offset(); 116 } 117 } 118 // Assumes VT with no fields are meaningless and illegal 119 last_offset += last_tsz; 120 assert(last_offset > first_offset && last_tsz, "Invariant"); 121 return 1 << upper_log2(last_offset - first_offset); 122 } 123 124 instanceOop ValueKlass::allocate_instance(TRAPS) { 125 int size = size_helper(); // Query before forming handle. 126 127 instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL); 128 assert(oop->mark().is_always_locked(), "Unlocked value type"); 129 return oop; 130 } 131 132 bool ValueKlass::is_atomic() { 133 return (nonstatic_field_size() * heapOopSize) <= longSize; 134 } 135 136 int ValueKlass::nonstatic_oop_count() { 137 int oops = 0; 138 int map_count = nonstatic_oop_map_count(); 139 OopMapBlock* block = start_of_nonstatic_oop_maps(); 140 OopMapBlock* end = block + map_count; 141 while (block != end) { 142 oops += block->count(); 143 block++; 144 } 145 return oops; 146 } 147 148 // Arrays of... 149 150 bool ValueKlass::flatten_array() { 151 if (!ValueArrayFlatten) { 152 return false; 153 } 154 155 int elem_bytes = raw_value_byte_size(); 156 // Too big 157 if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) { 158 return false; 159 } 160 // Too many embedded oops 161 if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) { 162 return false; 163 } 164 165 return true; 166 } 167 168 169 Klass* ValueKlass::array_klass_impl(ArrayStorageProperties storage_props, bool or_null, int n, TRAPS) { 170 if (storage_props.is_null_free()) { 171 return value_array_klass(storage_props, or_null, n, THREAD); 172 } else { 173 return InstanceKlass::array_klass_impl(storage_props, or_null, n, THREAD); 174 } 175 } 176 177 Klass* ValueKlass::array_klass_impl(ArrayStorageProperties storage_props, bool or_null, TRAPS) { 178 return array_klass_impl(storage_props, or_null, 1, THREAD); 179 } 180 181 Klass* ValueKlass::value_array_klass(ArrayStorageProperties storage_props, bool or_null, int rank, TRAPS) { 182 Klass* vak = acquire_value_array_klass(); 183 if (vak == NULL) { 184 if (or_null) return NULL; 185 ResourceMark rm; 186 { 187 // Atomic creation of array_klasses 188 MutexLocker ma(MultiArray_lock, THREAD); 189 if (get_value_array_klass() == NULL) { 190 vak = allocate_value_array_klass(CHECK_NULL); 191 OrderAccess::release_store((Klass**)adr_value_array_klass(), vak); 192 } 193 } 194 } 195 if (!vak->is_valueArray_klass()) { 196 storage_props.clear_flattened(); 197 } 198 if (or_null) { 199 return vak->array_klass_or_null(storage_props, rank); 200 } 201 return vak->array_klass(storage_props, rank, THREAD); 202 } 203 204 Klass* ValueKlass::allocate_value_array_klass(TRAPS) { 205 if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) { 206 return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD); 207 } 208 return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD); 209 } 210 211 void ValueKlass::array_klasses_do(void f(Klass* k)) { 212 InstanceKlass::array_klasses_do(f); 213 if (get_value_array_klass() != NULL) 214 ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f); 215 } 216 217 // Value type arguments are not passed by reference, instead each 218 // field of the value type is passed as an argument. This helper 219 // function collects the fields of the value types (including embedded 220 // value type's fields) in a list. Included with the field's type is 221 // the offset of each field in the value type: i2c and c2i adapters 222 // need that to load or store fields. Finally, the list of fields is 223 // sorted in order of increasing offsets: the adapters and the 224 // compiled code need to agree upon the order of fields. 225 // 226 // The list of basic types that is returned starts with a T_VALUETYPE 227 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID pairs are used as 228 // delimiters. Every entry between the two is a field of the value 229 // type. If there's an embedded value type in the list, it also starts 230 // with a T_VALUETYPE and ends with a T_VOID. This is so we can 231 // generate a unique fingerprint for the method's adapters and we can 232 // generate the list of basic types from the interpreter point of view 233 // (value types passed as reference: iterate on the list until a 234 // T_VALUETYPE, drop everything until and including the closing 235 // T_VOID) or the compiler point of view (each field of the value 236 // types is an argument: drop all T_VALUETYPE/T_VOID from the list). 237 int ValueKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off) { 238 int count = 0; 239 SigEntry::add_entry(sig, T_VALUETYPE, base_off); 240 for (AllFieldStream fs(this); !fs.done(); fs.next()) { 241 if (fs.access_flags().is_static()) continue; 242 int offset = base_off + fs.offset() - (base_off > 0 ? first_field_offset() : 0); 243 if (fs.is_flattened()) { 244 // Resolve klass of flattened value type field and recursively collect fields 245 Klass* vk = get_value_field_klass(fs.index()); 246 count += ValueKlass::cast(vk)->collect_fields(sig, offset); 247 } else { 248 BasicType bt = FieldType::basic_type(fs.signature()); 249 if (bt == T_VALUETYPE) { 250 bt = T_OBJECT; 251 } 252 SigEntry::add_entry(sig, bt, offset); 253 count += type2size[bt]; 254 } 255 } 256 int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0); 257 SigEntry::add_entry(sig, T_VOID, offset); 258 if (base_off == 0) { 259 sig->sort(SigEntry::compare); 260 } 261 assert(sig->at(0)._bt == T_VALUETYPE && sig->at(sig->length()-1)._bt == T_VOID, "broken structure"); 262 return count; 263 } 264 265 void ValueKlass::initialize_calling_convention(TRAPS) { 266 // Because the pack and unpack handler addresses need to be loadable from generated code, 267 // they are stored at a fixed offset in the klass metadata. Since value type klasses do 268 // not have a vtable, the vtable offset is used to store these addresses. 269 if (is_scalarizable() && (ValueTypeReturnedAsFields || ValueTypePassFieldsAsArgs)) { 270 ResourceMark rm; 271 GrowableArray<SigEntry> sig_vk; 272 int nb_fields = collect_fields(&sig_vk); 273 Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK); 274 *((Array<SigEntry>**)adr_extended_sig()) = extended_sig; 275 for (int i = 0; i < sig_vk.length(); i++) { 276 extended_sig->at_put(i, sig_vk.at(i)); 277 } 278 279 if (ValueTypeReturnedAsFields) { 280 nb_fields++; 281 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields); 282 sig_bt[0] = T_METADATA; 283 SigEntry::fill_sig_bt(&sig_vk, sig_bt+1); 284 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields); 285 int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields); 286 287 if (total > 0) { 288 Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK); 289 *((Array<VMRegPair>**)adr_return_regs()) = return_regs; 290 for (int i = 0; i < nb_fields; i++) { 291 return_regs->at_put(i, regs[i]); 292 } 293 294 BufferedValueTypeBlob* buffered_blob = SharedRuntime::generate_buffered_value_type_adapter(this); 295 *((address*)adr_pack_handler()) = buffered_blob->pack_fields(); 296 *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields(); 297 assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob"); 298 } 299 } 300 } 301 } 302 303 void ValueKlass::deallocate_contents(ClassLoaderData* loader_data) { 304 if (extended_sig() != NULL) { 305 MetadataFactory::free_array<SigEntry>(loader_data, extended_sig()); 306 } 307 if (return_regs() != NULL) { 308 MetadataFactory::free_array<VMRegPair>(loader_data, return_regs()); 309 } 310 cleanup_blobs(); 311 InstanceKlass::deallocate_contents(loader_data); 312 } 313 314 void ValueKlass::cleanup(ValueKlass* ik) { 315 ik->cleanup_blobs(); 316 } 317 318 void ValueKlass::cleanup_blobs() { 319 if (pack_handler() != NULL) { 320 CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler()); 321 assert(buffered_blob->is_buffered_value_type_blob(), "bad blob type"); 322 BufferBlob::free((BufferBlob*)buffered_blob); 323 *((address*)adr_pack_handler()) = NULL; 324 *((address*)adr_unpack_handler()) = NULL; 325 } 326 } 327 328 // Can this value type be scalarized? 329 bool ValueKlass::is_scalarizable() const { 330 return ScalarizeValueTypes; 331 } 332 333 // Can this value type be returned as multiple values? 334 bool ValueKlass::can_be_returned_as_fields() const { 335 return return_regs() != NULL; 336 } 337 338 // Create handles for all oop fields returned in registers that are going to be live across a safepoint 339 void ValueKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const { 340 Thread* thread = Thread::current(); 341 const Array<SigEntry>* sig_vk = extended_sig(); 342 const Array<VMRegPair>* regs = return_regs(); 343 int j = 1; 344 345 for (int i = 0; i < sig_vk->length(); i++) { 346 BasicType bt = sig_vk->at(i)._bt; 347 if (bt == T_OBJECT || bt == T_ARRAY) { 348 VMRegPair pair = regs->at(j); 349 address loc = reg_map.location(pair.first()); 350 oop v = *(oop*)loc; 351 assert(v == NULL || oopDesc::is_oop(v), "not an oop?"); 352 assert(Universe::heap()->is_in_or_null(v), "must be heap pointer"); 353 handles.push(Handle(thread, v)); 354 } 355 if (bt == T_VALUETYPE) { 356 continue; 357 } 358 if (bt == T_VOID && 359 sig_vk->at(i-1)._bt != T_LONG && 360 sig_vk->at(i-1)._bt != T_DOUBLE) { 361 continue; 362 } 363 j++; 364 } 365 assert(j == regs->length(), "missed a field?"); 366 } 367 368 // Update oop fields in registers from handles after a safepoint 369 void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const { 370 assert(ValueTypeReturnedAsFields, "inconsistent"); 371 const Array<SigEntry>* sig_vk = extended_sig(); 372 const Array<VMRegPair>* regs = return_regs(); 373 assert(regs != NULL, "inconsistent"); 374 375 int j = 1; 376 for (int i = 0, k = 0; i < sig_vk->length(); i++) { 377 BasicType bt = sig_vk->at(i)._bt; 378 if (bt == T_OBJECT || bt == T_ARRAY) { 379 VMRegPair pair = regs->at(j); 380 address loc = reg_map.location(pair.first()); 381 *(oop*)loc = handles.at(k++)(); 382 } 383 if (bt == T_VALUETYPE) { 384 continue; 385 } 386 if (bt == T_VOID && 387 sig_vk->at(i-1)._bt != T_LONG && 388 sig_vk->at(i-1)._bt != T_DOUBLE) { 389 continue; 390 } 391 j++; 392 } 393 assert(j == regs->length(), "missed a field?"); 394 } 395 396 // Fields are in registers. Create an instance of the value type and 397 // initialize it with the values of the fields. 398 oop ValueKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) { 399 oop new_vt = allocate_instance(CHECK_NULL); 400 const Array<SigEntry>* sig_vk = extended_sig(); 401 const Array<VMRegPair>* regs = return_regs(); 402 403 int j = 1; 404 int k = 0; 405 for (int i = 0; i < sig_vk->length(); i++) { 406 BasicType bt = sig_vk->at(i)._bt; 407 if (bt == T_VALUETYPE) { 408 continue; 409 } 410 if (bt == T_VOID) { 411 if (sig_vk->at(i-1)._bt == T_LONG || 412 sig_vk->at(i-1)._bt == T_DOUBLE) { 413 j++; 414 } 415 continue; 416 } 417 int off = sig_vk->at(i)._offset; 418 assert(off > 0, "offset in object should be positive"); 419 VMRegPair pair = regs->at(j); 420 address loc = reg_map.location(pair.first()); 421 switch(bt) { 422 case T_BOOLEAN: { 423 new_vt->bool_field_put(off, *(jboolean*)loc); 424 break; 425 } 426 case T_CHAR: { 427 new_vt->char_field_put(off, *(jchar*)loc); 428 break; 429 } 430 case T_BYTE: { 431 new_vt->byte_field_put(off, *(jbyte*)loc); 432 break; 433 } 434 case T_SHORT: { 435 new_vt->short_field_put(off, *(jshort*)loc); 436 break; 437 } 438 case T_INT: { 439 new_vt->int_field_put(off, *(jint*)loc); 440 break; 441 } 442 case T_LONG: { 443 #ifdef _LP64 444 new_vt->double_field_put(off, *(jdouble*)loc); 445 #else 446 Unimplemented(); 447 #endif 448 break; 449 } 450 case T_OBJECT: 451 case T_ARRAY: { 452 Handle handle = handles.at(k++); 453 new_vt->obj_field_put(off, handle()); 454 break; 455 } 456 case T_FLOAT: { 457 new_vt->float_field_put(off, *(jfloat*)loc); 458 break; 459 } 460 case T_DOUBLE: { 461 new_vt->double_field_put(off, *(jdouble*)loc); 462 break; 463 } 464 default: 465 ShouldNotReachHere(); 466 } 467 *(intptr_t*)loc = 0xDEAD; 468 j++; 469 } 470 assert(j == regs->length(), "missed a field?"); 471 assert(k == handles.length(), "missed an oop?"); 472 return new_vt; 473 } 474 475 // Check the return register for a ValueKlass oop 476 ValueKlass* ValueKlass::returned_value_klass(const RegisterMap& map) { 477 BasicType bt = T_METADATA; 478 VMRegPair pair; 479 int nb = SharedRuntime::java_return_convention(&bt, &pair, 1); 480 assert(nb == 1, "broken"); 481 482 address loc = map.location(pair.first()); 483 intptr_t ptr = *(intptr_t*)loc; 484 if (is_set_nth_bit(ptr, 0)) { 485 // Oop is tagged, must be a ValueKlass oop 486 clear_nth_bit(ptr, 0); 487 assert(Metaspace::contains((void*)ptr), "should be klass"); 488 ValueKlass* vk = (ValueKlass*)ptr; 489 assert(vk->can_be_returned_as_fields(), "must be able to return as fields"); 490 return vk; 491 } 492 #ifdef ASSERT 493 // Oop is not tagged, must be a valid oop 494 if (VerifyOops) { 495 oopDesc::verify(oop((HeapWord*)ptr)); 496 } 497 #endif 498 return NULL; 499 } 500 501 void ValueKlass::verify_on(outputStream* st) { 502 InstanceKlass::verify_on(st); 503 guarantee(prototype_header().is_always_locked(), "Prototype header is not always locked"); 504 } 505 506 void ValueKlass::oop_verify_on(oop obj, outputStream* st) { 507 InstanceKlass::oop_verify_on(obj, st); 508 guarantee(obj->mark().is_always_locked(), "Header is not always locked"); 509 }