1 /*
2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/barrierSet.hpp"
27 #include "gc/shared/collectedHeap.inline.hpp"
28 #include "gc/shared/gcLocker.inline.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "logging/log.hpp"
31 #include "memory/metadataFactory.hpp"
32 #include "oops/access.hpp"
33 #include "oops/compressedOops.inline.hpp"
34 #include "oops/fieldStreams.hpp"
35 #include "oops/instanceKlass.inline.hpp"
36 #include "oops/method.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "oops/objArrayKlass.hpp"
39 #include "oops/valueKlass.hpp"
40 #include "oops/valueArrayKlass.hpp"
41 #include "runtime/fieldDescriptor.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/safepointVerifiers.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/signature.hpp"
46 #include "runtime/thread.inline.hpp"
47 #include "utilities/copy.hpp"
48
49 int ValueKlass::first_field_offset() const {
50 #ifdef ASSERT
51 int first_offset = INT_MAX;
52 for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
53 if (fs.offset() < first_offset) first_offset= fs.offset();
54 }
55 #endif
56 int base_offset = instanceOopDesc::base_offset_in_bytes();
57 // The first field of value types is aligned on a long boundary
58 base_offset = align_up(base_offset, BytesPerLong);
59 assert(base_offset == first_offset, "inconsistent offsets");
60 return base_offset;
61 }
62
63 int ValueKlass::raw_value_byte_size() const {
64 int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
65 // If bigger than 64 bits or needs oop alignment, then use jlong aligned
66 // which for values should be jlong aligned, asserts in raw_field_copy otherwise
67 if (heapOopAlignedSize >= longSize || contains_oops()) {
68 return heapOopAlignedSize;
69 }
70 // Small primitives...
71 // If a few small basic type fields, return the actual size, i.e.
72 // 1 byte = 1
73 // 2 byte = 2
74 // 3 byte = 4, because pow2 needed for element stores
75 int first_offset = first_field_offset();
76 int last_offset = 0; // find the last offset, add basic type size
77 int last_tsz = 0;
78 for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
79 if (fs.access_flags().is_static()) {
80 continue;
81 } else if (fs.offset() > last_offset) {
82 BasicType type = fs.field_descriptor().field_type();
83 if (is_java_primitive(type)) {
84 last_tsz = type2aelembytes(type);
85 } else if (type == T_VALUETYPE) {
86 // Not just primitives. Layout aligns embedded value, so use jlong aligned it is
87 return heapOopAlignedSize;
88 } else {
89 guarantee(0, "Unknown type %d", type);
90 }
91 assert(last_tsz != 0, "Invariant");
92 last_offset = fs.offset();
93 }
94 }
95 // Assumes VT with no fields are meaningless and illegal
96 last_offset += last_tsz;
97 assert(last_offset > first_offset && last_tsz, "Invariant");
98 return 1 << upper_log2(last_offset - first_offset);
99 }
100
101 instanceOop ValueKlass::allocate_instance(TRAPS) {
102 int size = size_helper(); // Query before forming handle.
103
104 instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
105 assert(oop->mark()->is_always_locked(), "Unlocked value type");
106 return oop;
107 }
108
109 bool ValueKlass::is_atomic() {
110 return (nonstatic_field_size() * heapOopSize) <= longSize;
111 }
112
113 int ValueKlass::nonstatic_oop_count() {
114 int oops = 0;
115 int map_count = nonstatic_oop_map_count();
116 OopMapBlock* block = start_of_nonstatic_oop_maps();
117 OopMapBlock* end = block + map_count;
118 while (block != end) {
119 oops += block->count();
120 block++;
121 }
122 return oops;
123 }
124
125 // Arrays of...
126
127 bool ValueKlass::flatten_array() {
128 if (!ValueArrayFlatten) {
129 return false;
130 }
131
132 int elem_bytes = raw_value_byte_size();
133 // Too big
134 if ((ValueArrayElemMaxFlatSize >= 0) && (elem_bytes > ValueArrayElemMaxFlatSize)) {
135 return false;
136 }
137 // Too many embedded oops
138 if ((ValueArrayElemMaxFlatOops >= 0) && (nonstatic_oop_count() > ValueArrayElemMaxFlatOops)) {
139 return false;
140 }
141
142 return true;
143 }
144
145
146 Klass* ValueKlass::array_klass_impl(ArrayStorageProperties storage_props, bool or_null, int n, TRAPS) {
147 if (storage_props.is_null_free()) {
148 return value_array_klass(storage_props, or_null, n, THREAD);
149 } else {
150 return InstanceKlass::array_klass_impl(storage_props, or_null, n, THREAD);
151 }
152 }
153
154 Klass* ValueKlass::array_klass_impl(ArrayStorageProperties storage_props, bool or_null, TRAPS) {
155 return array_klass_impl(storage_props, or_null, 1, THREAD);
156 }
157
158 Klass* ValueKlass::value_array_klass(ArrayStorageProperties storage_props, bool or_null, int rank, TRAPS) {
159 Klass* vak = acquire_value_array_klass();
160 if (vak == NULL) {
161 if (or_null) return NULL;
162 ResourceMark rm;
163 {
164 // Atomic creation of array_klasses
165 MutexLocker ma(MultiArray_lock, THREAD);
166 if (get_value_array_klass() == NULL) {
167 vak = allocate_value_array_klass(CHECK_NULL);
168 OrderAccess::release_store((Klass**)adr_value_array_klass(), vak);
169 }
170 }
171 }
172 if (!vak->is_valueArray_klass()) {
173 storage_props.clear_flattened();
174 }
175 if (or_null) {
176 return vak->array_klass_or_null(storage_props, rank);
177 }
178 return vak->array_klass(storage_props, rank, THREAD);
179 }
180
181 Klass* ValueKlass::allocate_value_array_klass(TRAPS) {
182 if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) {
183 return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD);
184 }
185 return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD);
186 }
187
188 void ValueKlass::array_klasses_do(void f(Klass* k)) {
189 InstanceKlass::array_klasses_do(f);
190 if (get_value_array_klass() != NULL)
191 ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f);
192 }
193
194 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
195 /*
196 * Try not to shear fields even if not an atomic store...
197 *
198 * First 3 cases handle value array store, otherwise works on the same basis
199 * as JVM_Clone, at this size data is aligned. The order of primitive types
200 * is largest to smallest, and it not possible for fields to stradle long
201 * copy boundaries.
202 *
203 * If MT without exclusive access, possible to observe partial value store,
204 * but not partial primitive and reference field values
205 */
206 switch (raw_byte_size) {
207 case 1:
208 *((jbyte*) dst) = *(jbyte*)src;
209 break;
210 case 2:
211 *((jshort*) dst) = *(jshort*)src;
212 break;
213 case 4:
214 *((jint*) dst) = *(jint*) src;
215 break;
216 default:
217 assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
218 Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
219 }
220 }
221
222 /*
223 * Store the value of this klass contained with src into dst.
224 *
225 * This operation is appropriate for use from vastore, vaload and putfield (for values)
226 *
227 * GC barriers currently can lock with no safepoint check and allocate c-heap,
228 * so raw point is "safe" for now.
229 *
230 * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
231 *
232 */
233 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
234 if (contains_oops()) {
235 if (dst_heap) {
236 // src/dst aren't oops, need offset to adjust oop map offset
237 const address dst_oop_addr = ((address) dst) - first_field_offset();
238
239 ModRefBarrierSet* bs = barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set());
240
241 // Pre-barriers...
242 OopMapBlock* map = start_of_nonstatic_oop_maps();
243 OopMapBlock* const end = map + nonstatic_oop_map_count();
244 while (map != end) {
245 // Shame we can't just use the existing oop iterator...src/dst aren't oop
246 address doop_address = dst_oop_addr + map->offset();
247 // TEMP HACK: barrier code need to migrate to => access API (need own versions of value type ops)
248 if (UseCompressedOops) {
249 bs->write_ref_array_pre((narrowOop*) doop_address, map->count(), dst_uninitialized);
250 } else {
251 bs->write_ref_array_pre((oop*) doop_address, map->count(), dst_uninitialized);
252 }
253 map++;
254 }
255
256 raw_field_copy(src, dst, raw_byte_size);
257
258 // Post-barriers...
259 map = start_of_nonstatic_oop_maps();
260 while (map != end) {
261 address doop_address = dst_oop_addr + map->offset();
262 bs->write_ref_array((HeapWord*) doop_address, map->count());
263 map++;
264 }
265 } else { // Buffered value case
266 raw_field_copy(src, dst, raw_byte_size);
267 }
268 } else { // Primitive-only case...
269 raw_field_copy(src, dst, raw_byte_size);
270 }
271 }
272
273 // Value type arguments are not passed by reference, instead each
274 // field of the value type is passed as an argument. This helper
275 // function collects the fields of the value types (including embedded
276 // value type's fields) in a list. Included with the field's type is
277 // the offset of each field in the value type: i2c and c2i adapters
278 // need that to load or store fields. Finally, the list of fields is
279 // sorted in order of increasing offsets: the adapters and the
280 // compiled code need to agree upon the order of fields.
281 //
282 // The list of basic types that is returned starts with a T_VALUETYPE
283 // and ends with an extra T_VOID. T_VALUETYPE/T_VOID pairs are used as
284 // delimiters. Every entry between the two is a field of the value
285 // type. If there's an embedded value type in the list, it also starts
286 // with a T_VALUETYPE and ends with a T_VOID. This is so we can
287 // generate a unique fingerprint for the method's adapters and we can
288 // generate the list of basic types from the interpreter point of view
289 // (value types passed as reference: iterate on the list until a
290 // T_VALUETYPE, drop everything until and including the closing
291 // T_VOID) or the compiler point of view (each field of the value
292 // types is an argument: drop all T_VALUETYPE/T_VOID from the list).
293 int ValueKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off) const {
294 int count = 0;
295 SigEntry::add_entry(sig, T_VALUETYPE, base_off);
296 for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
297 if (fs.access_flags().is_static()) continue;
298 int offset = base_off + fs.offset() - (base_off > 0 ? first_field_offset() : 0);
299 if (fs.is_flattened()) {
300 // Resolve klass of flattened value type field and recursively collect fields
301 Klass* vk = get_value_field_klass(fs.index());
302 count += ValueKlass::cast(vk)->collect_fields(sig, offset);
303 } else {
304 BasicType bt = FieldType::basic_type(fs.signature());
305 if (bt == T_VALUETYPE) {
306 bt = T_OBJECT;
307 }
308 SigEntry::add_entry(sig, bt, offset);
309 count += type2size[bt];
310 }
311 }
312 int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0);
313 SigEntry::add_entry(sig, T_VOID, offset);
314 if (base_off == 0) {
315 sig->sort(SigEntry::compare);
316 }
317 assert(sig->at(0)._bt == T_VALUETYPE && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
318 return count;
319 }
320
321 void ValueKlass::initialize_calling_convention(TRAPS) {
322 // Because the pack and unpack handler addresses need to be loadable from generated code,
323 // they are stored at a fixed offset in the klass metadata. Since value type klasses do
324 // not have a vtable, the vtable offset is used to store these addresses.
325 if (is_scalarizable() && (ValueTypeReturnedAsFields || ValueTypePassFieldsAsArgs)) {
326 ResourceMark rm;
327 GrowableArray<SigEntry> sig_vk;
328 int nb_fields = collect_fields(&sig_vk);
329 Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
330 *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
331 for (int i = 0; i < sig_vk.length(); i++) {
332 extended_sig->at_put(i, sig_vk.at(i));
333 }
334
335 if (ValueTypeReturnedAsFields) {
336 nb_fields++;
337 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
338 sig_bt[0] = T_METADATA;
339 SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
340 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
341 int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
342
343 if (total > 0) {
344 Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
345 *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
346 for (int i = 0; i < nb_fields; i++) {
347 return_regs->at_put(i, regs[i]);
348 }
349
350 BufferedValueTypeBlob* buffered_blob = SharedRuntime::generate_buffered_value_type_adapter(this);
351 *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
352 *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
353 assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
354 }
355 }
356 }
357 }
358
359 void ValueKlass::deallocate_contents(ClassLoaderData* loader_data) {
360 if (extended_sig() != NULL) {
361 MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
362 }
363 if (return_regs() != NULL) {
364 MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
365 }
366 cleanup_blobs();
367 InstanceKlass::deallocate_contents(loader_data);
368 }
369
370 void ValueKlass::cleanup(ValueKlass* ik) {
371 ik->cleanup_blobs();
372 }
373
374 void ValueKlass::cleanup_blobs() {
375 if (pack_handler() != NULL) {
376 CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
377 assert(buffered_blob->is_buffered_value_type_blob(), "bad blob type");
378 BufferBlob::free((BufferBlob*)buffered_blob);
379 *((address*)adr_pack_handler()) = NULL;
380 *((address*)adr_unpack_handler()) = NULL;
381 }
382 }
383
384 // Can this value type be scalarized?
385 bool ValueKlass::is_scalarizable() const {
386 return ScalarizeValueTypes;
387 }
388
389 // Can this value type be returned as multiple values?
390 bool ValueKlass::can_be_returned_as_fields() const {
391 return return_regs() != NULL;
392 }
393
394 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
395 void ValueKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
396 Thread* thread = Thread::current();
397 const Array<SigEntry>* sig_vk = extended_sig();
398 const Array<VMRegPair>* regs = return_regs();
399 int j = 1;
400
401 for (int i = 0; i < sig_vk->length(); i++) {
402 BasicType bt = sig_vk->at(i)._bt;
403 if (bt == T_OBJECT || bt == T_ARRAY) {
404 VMRegPair pair = regs->at(j);
405 address loc = reg_map.location(pair.first());
406 oop v = *(oop*)loc;
407 assert(v == NULL || oopDesc::is_oop(v), "not an oop?");
408 assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
409 handles.push(Handle(thread, v));
410 }
411 if (bt == T_VALUETYPE) {
412 continue;
413 }
414 if (bt == T_VOID &&
415 sig_vk->at(i-1)._bt != T_LONG &&
416 sig_vk->at(i-1)._bt != T_DOUBLE) {
417 continue;
418 }
419 j++;
420 }
421 assert(j == regs->length(), "missed a field?");
422 }
423
424 // Update oop fields in registers from handles after a safepoint
425 void ValueKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
426 assert(ValueTypeReturnedAsFields, "inconsistent");
427 const Array<SigEntry>* sig_vk = extended_sig();
428 const Array<VMRegPair>* regs = return_regs();
429 assert(regs != NULL, "inconsistent");
430
431 int j = 1;
432 for (int i = 0, k = 0; i < sig_vk->length(); i++) {
433 BasicType bt = sig_vk->at(i)._bt;
434 if (bt == T_OBJECT || bt == T_ARRAY) {
435 VMRegPair pair = regs->at(j);
436 address loc = reg_map.location(pair.first());
437 *(oop*)loc = handles.at(k++)();
438 }
439 if (bt == T_VALUETYPE) {
440 continue;
441 }
442 if (bt == T_VOID &&
443 sig_vk->at(i-1)._bt != T_LONG &&
444 sig_vk->at(i-1)._bt != T_DOUBLE) {
445 continue;
446 }
447 j++;
448 }
449 assert(j == regs->length(), "missed a field?");
450 }
451
452 // Fields are in registers. Create an instance of the value type and
453 // initialize it with the values of the fields.
454 oop ValueKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
455 oop new_vt = allocate_instance(CHECK_NULL);
456 const Array<SigEntry>* sig_vk = extended_sig();
457 const Array<VMRegPair>* regs = return_regs();
458
459 int j = 1;
460 int k = 0;
461 for (int i = 0; i < sig_vk->length(); i++) {
462 BasicType bt = sig_vk->at(i)._bt;
463 if (bt == T_VALUETYPE) {
464 continue;
465 }
466 if (bt == T_VOID) {
467 if (sig_vk->at(i-1)._bt == T_LONG ||
468 sig_vk->at(i-1)._bt == T_DOUBLE) {
469 j++;
470 }
471 continue;
472 }
473 int off = sig_vk->at(i)._offset;
474 assert(off > 0, "offset in object should be positive");
475 VMRegPair pair = regs->at(j);
476 address loc = reg_map.location(pair.first());
477 switch(bt) {
478 case T_BOOLEAN: {
479 new_vt->bool_field_put(off, *(jboolean*)loc);
480 break;
481 }
482 case T_CHAR: {
483 new_vt->char_field_put(off, *(jchar*)loc);
484 break;
485 }
486 case T_BYTE: {
487 new_vt->byte_field_put(off, *(jbyte*)loc);
488 break;
489 }
490 case T_SHORT: {
491 new_vt->short_field_put(off, *(jshort*)loc);
492 break;
493 }
494 case T_INT: {
495 new_vt->int_field_put(off, *(jint*)loc);
496 break;
497 }
498 case T_LONG: {
499 #ifdef _LP64
500 new_vt->double_field_put(off, *(jdouble*)loc);
501 #else
502 Unimplemented();
503 #endif
504 break;
505 }
506 case T_OBJECT:
507 case T_ARRAY: {
508 Handle handle = handles.at(k++);
509 new_vt->obj_field_put(off, handle());
510 break;
511 }
512 case T_FLOAT: {
513 new_vt->float_field_put(off, *(jfloat*)loc);
514 break;
515 }
516 case T_DOUBLE: {
517 new_vt->double_field_put(off, *(jdouble*)loc);
518 break;
519 }
520 default:
521 ShouldNotReachHere();
522 }
523 *(intptr_t*)loc = 0xDEAD;
524 j++;
525 }
526 assert(j == regs->length(), "missed a field?");
527 assert(k == handles.length(), "missed an oop?");
528 return new_vt;
529 }
530
531 // Check the return register for a ValueKlass oop
532 ValueKlass* ValueKlass::returned_value_klass(const RegisterMap& map) {
533 BasicType bt = T_METADATA;
534 VMRegPair pair;
535 int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
536 assert(nb == 1, "broken");
537
538 address loc = map.location(pair.first());
539 intptr_t ptr = *(intptr_t*)loc;
540 if (is_set_nth_bit(ptr, 0)) {
541 // Oop is tagged, must be a ValueKlass oop
542 clear_nth_bit(ptr, 0);
543 assert(Metaspace::contains((void*)ptr), "should be klass");
544 ValueKlass* vk = (ValueKlass*)ptr;
545 assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
546 return vk;
547 }
548 #ifdef ASSERT
549 // Oop is not tagged, must be a valid oop
550 if (VerifyOops) {
551 oopDesc::verify(oop((HeapWord*)ptr));
552 }
553 #endif
554 return NULL;
555 }
556
557 void ValueKlass::verify_on(outputStream* st) {
558 InstanceKlass::verify_on(st);
559 guarantee(prototype_header()->is_always_locked(), "Prototype header is not always locked");
560 }
561
562 void ValueKlass::oop_verify_on(oop obj, outputStream* st) {
563 InstanceKlass::oop_verify_on(obj, st);
564 guarantee(obj->mark()->is_always_locked(), "Header is not always locked");
565 }
--- EOF ---