1 /*
2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "ci/ciUtilities.inline.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "jfr/support/jfrIntrinsics.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/objArrayKlass.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/arraycopynode.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/callGenerator.hpp"
40 #include "opto/castnode.hpp"
41 #include "opto/cfgnode.hpp"
42 #include "opto/convertnode.hpp"
43 #include "opto/countbitsnode.hpp"
44 #include "opto/intrinsicnode.hpp"
45 #include "opto/idealKit.hpp"
46 #include "opto/mathexactnode.hpp"
47 #include "opto/movenode.hpp"
48 #include "opto/mulnode.hpp"
49 #include "opto/narrowptrnode.hpp"
50 #include "opto/opaquenode.hpp"
51 #include "opto/parse.hpp"
52 #include "opto/runtime.hpp"
53 #include "opto/rootnode.hpp"
54 #include "opto/subnode.hpp"
55 #include "opto/valuetypenode.hpp"
56 #include "prims/nativeLookup.hpp"
57 #include "prims/unsafe.hpp"
58 #include "runtime/objectMonitor.hpp"
59 #include "runtime/sharedRuntime.hpp"
60 #include "utilities/macros.hpp"
61
62
63 class LibraryIntrinsic : public InlineCallGenerator {
64 // Extend the set of intrinsics known to the runtime:
65 public:
66 private:
67 bool _is_virtual;
68 bool _does_virtual_dispatch;
69 int8_t _predicates_count; // Intrinsic is predicated by several conditions
70 int8_t _last_predicate; // Last generated predicate
71 vmIntrinsics::ID _intrinsic_id;
72
73 public:
74 LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
75 : InlineCallGenerator(m),
76 _is_virtual(is_virtual),
77 _does_virtual_dispatch(does_virtual_dispatch),
78 _predicates_count((int8_t)predicates_count),
79 _last_predicate((int8_t)-1),
80 _intrinsic_id(id)
81 {
82 }
83 virtual bool is_intrinsic() const { return true; }
84 virtual bool is_virtual() const { return _is_virtual; }
85 virtual bool is_predicated() const { return _predicates_count > 0; }
86 virtual int predicates_count() const { return _predicates_count; }
87 virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
88 virtual JVMState* generate(JVMState* jvms);
89 virtual Node* generate_predicate(JVMState* jvms, int predicate);
90 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
91 };
92
93
94 // Local helper class for LibraryIntrinsic:
95 class LibraryCallKit : public GraphKit {
96 private:
97 LibraryIntrinsic* _intrinsic; // the library intrinsic being called
98 Node* _result; // the result node, if any
99 int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted
100
101 const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type);
102
103 public:
104 LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
105 : GraphKit(jvms),
106 _intrinsic(intrinsic),
107 _result(NULL)
108 {
109 // Check if this is a root compile. In that case we don't have a caller.
110 if (!jvms->has_method()) {
111 _reexecute_sp = sp();
112 } else {
113 // Find out how many arguments the interpreter needs when deoptimizing
114 // and save the stack pointer value so it can used by uncommon_trap.
115 // We find the argument count by looking at the declared signature.
116 bool ignored_will_link;
117 ciSignature* declared_signature = NULL;
118 ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
119 const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
120 _reexecute_sp = sp() + nargs; // "push" arguments back on stack
121 }
122 }
123
124 virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
125
126 ciMethod* caller() const { return jvms()->method(); }
127 int bci() const { return jvms()->bci(); }
128 LibraryIntrinsic* intrinsic() const { return _intrinsic; }
129 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
130 ciMethod* callee() const { return _intrinsic->method(); }
131
132 bool try_to_inline(int predicate);
133 Node* try_to_predicate(int predicate);
134
135 void push_result() {
136 // Push the result onto the stack.
137 if (!stopped() && result() != NULL) {
138 BasicType bt = result()->bottom_type()->basic_type();
139 push_node(bt, result());
140 }
141 }
142
143 private:
144 void fatal_unexpected_iid(vmIntrinsics::ID iid) {
145 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
146 }
147
148 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
149 void set_result(RegionNode* region, PhiNode* value);
150 Node* result() { return _result; }
151
152 virtual int reexecute_sp() { return _reexecute_sp; }
153
154 // Helper functions to inline natives
155 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
156 Node* generate_slow_guard(Node* test, RegionNode* region);
157 Node* generate_fair_guard(Node* test, RegionNode* region);
158 Node* generate_negative_guard(Node* index, RegionNode* region,
159 // resulting CastII of index:
160 Node* *pos_index = NULL);
161 Node* generate_limit_guard(Node* offset, Node* subseq_length,
162 Node* array_length,
163 RegionNode* region);
164 void generate_string_range_check(Node* array, Node* offset,
165 Node* length, bool char_count);
166 Node* generate_current_thread(Node* &tls_output);
167 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
168 RegionNode* region, int null_path,
169 int offset);
170 Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
171 RegionNode* region, int null_path) {
172 int offset = java_lang_Class::klass_offset_in_bytes();
173 return load_klass_from_mirror_common(mirror, never_see_null,
174 region, null_path,
175 offset);
176 }
177 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
178 RegionNode* region, int null_path) {
179 int offset = java_lang_Class::array_klass_offset_in_bytes();
180 return load_klass_from_mirror_common(mirror, never_see_null,
181 region, null_path,
182 offset);
183 }
184 Node* generate_access_flags_guard(Node* kls,
185 int modifier_mask, int modifier_bits,
186 RegionNode* region);
187 Node* generate_interface_guard(Node* kls, RegionNode* region);
188 Node* generate_value_guard(Node* kls, RegionNode* region);
189
190 enum ArrayKind {
191 AnyArray,
192 NonArray,
193 ObjectArray,
194 NonObjectArray,
195 TypeArray,
196 ValueArray
197 };
198
199 Node* generate_array_guard(Node* kls, RegionNode* region) {
200 return generate_array_guard_common(kls, region, AnyArray);
201 }
202 Node* generate_non_array_guard(Node* kls, RegionNode* region) {
203 return generate_array_guard_common(kls, region, NonArray);
204 }
205 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
206 return generate_array_guard_common(kls, region, ObjectArray);
207 }
208 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
209 return generate_array_guard_common(kls, region, NonObjectArray);
210 }
211 Node* generate_typeArray_guard(Node* kls, RegionNode* region) {
212 return generate_array_guard_common(kls, region, TypeArray);
213 }
214 Node* generate_valueArray_guard(Node* kls, RegionNode* region) {
215 return generate_array_guard_common(kls, region, ValueArray);
216 }
217 Node* generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind);
218 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
219 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
220 bool is_virtual = false, bool is_static = false);
221 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
222 return generate_method_call(method_id, false, true);
223 }
224 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
225 return generate_method_call(method_id, true, false);
226 }
227 Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
228 Node * field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
229
230 Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae);
231 bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae);
232 bool inline_string_indexOf(StrIntrinsicNode::ArgEnc ae);
233 bool inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae);
234 Node* make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
235 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae);
236 bool inline_string_indexOfChar();
237 bool inline_string_equals(StrIntrinsicNode::ArgEnc ae);
238 bool inline_string_toBytesU();
239 bool inline_string_getCharsU();
240 bool inline_string_copy(bool compress);
241 bool inline_string_char_access(bool is_store);
242 Node* round_double_node(Node* n);
243 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
244 bool inline_math_native(vmIntrinsics::ID id);
245 bool inline_math(vmIntrinsics::ID id);
246 template <typename OverflowOp>
247 bool inline_math_overflow(Node* arg1, Node* arg2);
248 void inline_math_mathExact(Node* math, Node* test);
249 bool inline_math_addExactI(bool is_increment);
250 bool inline_math_addExactL(bool is_increment);
251 bool inline_math_multiplyExactI();
252 bool inline_math_multiplyExactL();
253 bool inline_math_multiplyHigh();
254 bool inline_math_negateExactI();
255 bool inline_math_negateExactL();
256 bool inline_math_subtractExactI(bool is_decrement);
257 bool inline_math_subtractExactL(bool is_decrement);
258 bool inline_min_max(vmIntrinsics::ID id);
259 bool inline_notify(vmIntrinsics::ID id);
260 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
261 // This returns Type::AnyPtr, RawPtr, or OopPtr.
262 int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
263 Node* make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type = T_ILLEGAL, bool can_cast = false);
264
265 typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind;
266 DecoratorSet mo_decorator_for_access_kind(AccessKind kind);
267 bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
268 static bool klass_needs_init_guard(Node* kls);
269 bool inline_unsafe_allocate();
270 bool inline_unsafe_newArray(bool uninitialized);
271 bool inline_unsafe_copyMemory();
272 bool inline_unsafe_make_private_buffer();
273 bool inline_unsafe_finish_private_buffer();
274 bool inline_native_currentThread();
275
276 bool inline_native_time_funcs(address method, const char* funcName);
277 #ifdef JFR_HAVE_INTRINSICS
278 bool inline_native_classID();
279 bool inline_native_getEventWriter();
280 #endif
281 bool inline_native_isInterrupted();
282 bool inline_native_Class_query(vmIntrinsics::ID id);
283 bool inline_native_subtype_check();
284 bool inline_native_getLength();
285 bool inline_array_copyOf(bool is_copyOfRange);
286 bool inline_array_equals(StrIntrinsicNode::ArgEnc ae);
287 bool inline_preconditions_checkIndex();
288 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array);
289 bool inline_native_clone(bool is_virtual);
290 bool inline_native_Reflection_getCallerClass();
291 // Helper function for inlining native object hash method
292 bool inline_native_hashcode(bool is_virtual, bool is_static);
293 bool inline_native_getClass();
294
295 // Helper functions for inlining arraycopy
296 bool inline_arraycopy();
297 AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
298 RegionNode* slow_region);
299 JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
300 void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp,
301 uint new_idx);
302
303 typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
304 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind, AccessKind access_kind);
305 bool inline_unsafe_fence(vmIntrinsics::ID id);
306 bool inline_onspinwait();
307 bool inline_fp_conversions(vmIntrinsics::ID id);
308 bool inline_number_methods(vmIntrinsics::ID id);
309 bool inline_reference_get();
310 bool inline_Class_cast();
311 bool inline_aescrypt_Block(vmIntrinsics::ID id);
312 bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
313 bool inline_counterMode_AESCrypt(vmIntrinsics::ID id);
314 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
315 Node* inline_counterMode_AESCrypt_predicate();
316 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
317 Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
318 bool inline_ghash_processBlocks();
319 bool inline_base64_encodeBlock();
320 bool inline_sha_implCompress(vmIntrinsics::ID id);
321 bool inline_digestBase_implCompressMB(int predicate);
322 bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
323 bool long_state, address stubAddr, const char *stubName,
324 Node* src_start, Node* ofs, Node* limit);
325 Node* get_state_from_sha_object(Node *sha_object);
326 Node* get_state_from_sha5_object(Node *sha_object);
327 Node* inline_digestBase_implCompressMB_predicate(int predicate);
328 bool inline_encodeISOArray();
329 bool inline_updateCRC32();
330 bool inline_updateBytesCRC32();
331 bool inline_updateByteBufferCRC32();
332 Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
333 bool inline_updateBytesCRC32C();
334 bool inline_updateDirectByteBufferCRC32C();
335 bool inline_updateBytesAdler32();
336 bool inline_updateByteBufferAdler32();
337 bool inline_multiplyToLen();
338 bool inline_hasNegatives();
339 bool inline_squareToLen();
340 bool inline_mulAdd();
341 bool inline_montgomeryMultiply();
342 bool inline_montgomerySquare();
343 bool inline_vectorizedMismatch();
344 bool inline_fma(vmIntrinsics::ID id);
345 bool inline_character_compare(vmIntrinsics::ID id);
346 bool inline_fp_min_max(vmIntrinsics::ID id);
347
348 bool inline_profileBoolean();
349 bool inline_isCompileConstant();
350 void clear_upper_avx() {
351 #ifdef X86
352 if (UseAVX >= 2) {
353 C->set_clear_upper_avx(true);
354 }
355 #endif
356 }
357 };
358
359 //---------------------------make_vm_intrinsic----------------------------
360 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
361 vmIntrinsics::ID id = m->intrinsic_id();
362 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
363
364 if (!m->is_loaded()) {
365 // Do not attempt to inline unloaded methods.
366 return NULL;
367 }
368
369 C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization);
370 bool is_available = false;
371
372 {
373 // For calling is_intrinsic_supported and is_intrinsic_disabled_by_flag
374 // the compiler must transition to '_thread_in_vm' state because both
375 // methods access VM-internal data.
376 VM_ENTRY_MARK;
377 methodHandle mh(THREAD, m->get_Method());
378 is_available = compiler != NULL && compiler->is_intrinsic_supported(mh, is_virtual) &&
379 !C->directive()->is_intrinsic_disabled(mh) &&
380 !vmIntrinsics::is_disabled_by_flags(mh);
381
382 }
383
384 if (is_available) {
385 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
386 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
387 return new LibraryIntrinsic(m, is_virtual,
388 vmIntrinsics::predicates_needed(id),
389 vmIntrinsics::does_virtual_dispatch(id),
390 (vmIntrinsics::ID) id);
391 } else {
392 return NULL;
393 }
394 }
395
396 //----------------------register_library_intrinsics-----------------------
397 // Initialize this file's data structures, for each Compile instance.
398 void Compile::register_library_intrinsics() {
399 // Nothing to do here.
400 }
401
402 JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
403 LibraryCallKit kit(jvms, this);
404 Compile* C = kit.C;
405 int nodes = C->unique();
406 #ifndef PRODUCT
407 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
408 char buf[1000];
409 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
410 tty->print_cr("Intrinsic %s", str);
411 }
412 #endif
413 ciMethod* callee = kit.callee();
414 const int bci = kit.bci();
415
416 // Try to inline the intrinsic.
417 if ((CheckIntrinsics ? callee->intrinsic_candidate() : true) &&
418 kit.try_to_inline(_last_predicate)) {
419 const char *inline_msg = is_virtual() ? "(intrinsic, virtual)"
420 : "(intrinsic)";
421 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, inline_msg);
422 if (C->print_intrinsics() || C->print_inlining()) {
423 C->print_inlining(callee, jvms->depth() - 1, bci, inline_msg);
424 }
425 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
426 if (C->log()) {
427 C->log()->elem("intrinsic id='%s'%s nodes='%d'",
428 vmIntrinsics::name_at(intrinsic_id()),
429 (is_virtual() ? " virtual='1'" : ""),
430 C->unique() - nodes);
431 }
432 // Push the result from the inlined method onto the stack.
433 kit.push_result();
434 C->print_inlining_update(this);
435 return kit.transfer_exceptions_into_jvms();
436 }
437
438 // The intrinsic bailed out
439 if (jvms->has_method()) {
440 // Not a root compile.
441 const char* msg;
442 if (callee->intrinsic_candidate()) {
443 msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
444 } else {
445 msg = is_virtual() ? "failed to inline (intrinsic, virtual), method not annotated"
446 : "failed to inline (intrinsic), method not annotated";
447 }
448 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, msg);
449 if (C->print_intrinsics() || C->print_inlining()) {
450 C->print_inlining(callee, jvms->depth() - 1, bci, msg);
451 }
452 } else {
453 // Root compile
454 ResourceMark rm;
455 stringStream msg_stream;
456 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
457 vmIntrinsics::name_at(intrinsic_id()),
458 is_virtual() ? " (virtual)" : "", bci);
459 const char *msg = msg_stream.as_string();
460 log_debug(jit, inlining)("%s", msg);
461 if (C->print_intrinsics() || C->print_inlining()) {
462 tty->print("%s", msg);
463 }
464 }
465 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
466 C->print_inlining_update(this);
467 return NULL;
468 }
469
470 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
471 LibraryCallKit kit(jvms, this);
472 Compile* C = kit.C;
473 int nodes = C->unique();
474 _last_predicate = predicate;
475 #ifndef PRODUCT
476 assert(is_predicated() && predicate < predicates_count(), "sanity");
477 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
478 char buf[1000];
479 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
480 tty->print_cr("Predicate for intrinsic %s", str);
481 }
482 #endif
483 ciMethod* callee = kit.callee();
484 const int bci = kit.bci();
485
486 Node* slow_ctl = kit.try_to_predicate(predicate);
487 if (!kit.failing()) {
488 const char *inline_msg = is_virtual() ? "(intrinsic, virtual, predicate)"
489 : "(intrinsic, predicate)";
490 CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, inline_msg);
491 if (C->print_intrinsics() || C->print_inlining()) {
492 C->print_inlining(callee, jvms->depth() - 1, bci, inline_msg);
493 }
494 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
495 if (C->log()) {
496 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
497 vmIntrinsics::name_at(intrinsic_id()),
498 (is_virtual() ? " virtual='1'" : ""),
499 C->unique() - nodes);
500 }
501 return slow_ctl; // Could be NULL if the check folds.
502 }
503
504 // The intrinsic bailed out
505 if (jvms->has_method()) {
506 // Not a root compile.
507 const char* msg = "failed to generate predicate for intrinsic";
508 CompileTask::print_inlining_ul(kit.callee(), jvms->depth() - 1, bci, msg);
509 if (C->print_intrinsics() || C->print_inlining()) {
510 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
511 }
512 } else {
513 // Root compile
514 ResourceMark rm;
515 stringStream msg_stream;
516 msg_stream.print("Did not generate intrinsic %s%s at bci:%d in",
517 vmIntrinsics::name_at(intrinsic_id()),
518 is_virtual() ? " (virtual)" : "", bci);
519 const char *msg = msg_stream.as_string();
520 log_debug(jit, inlining)("%s", msg);
521 if (C->print_intrinsics() || C->print_inlining()) {
522 C->print_inlining_stream()->print("%s", msg);
523 }
524 }
525 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
526 return NULL;
527 }
528
529 bool LibraryCallKit::try_to_inline(int predicate) {
530 // Handle symbolic names for otherwise undistinguished boolean switches:
531 const bool is_store = true;
532 const bool is_compress = true;
533 const bool is_static = true;
534 const bool is_volatile = true;
535
536 if (!jvms()->has_method()) {
537 // Root JVMState has a null method.
538 assert(map()->memory()->Opcode() == Op_Parm, "");
539 // Insert the memory aliasing node
540 set_all_memory(reset_memory());
541 }
542 assert(merged_memory(), "");
543
544
545 switch (intrinsic_id()) {
546 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
547 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
548 case vmIntrinsics::_getClass: return inline_native_getClass();
549
550 case vmIntrinsics::_dsin:
551 case vmIntrinsics::_dcos:
552 case vmIntrinsics::_dtan:
553 case vmIntrinsics::_dabs:
554 case vmIntrinsics::_datan2:
555 case vmIntrinsics::_dsqrt:
556 case vmIntrinsics::_dexp:
557 case vmIntrinsics::_dlog:
558 case vmIntrinsics::_dlog10:
559 case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id());
560
561 case vmIntrinsics::_min:
562 case vmIntrinsics::_max: return inline_min_max(intrinsic_id());
563
564 case vmIntrinsics::_notify:
565 case vmIntrinsics::_notifyAll:
566 return inline_notify(intrinsic_id());
567
568 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
569 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
570 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
571 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
572 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
573 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
574 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
575 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
576 case vmIntrinsics::_multiplyHigh: return inline_math_multiplyHigh();
577 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
578 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
579 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
580 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
581
582 case vmIntrinsics::_arraycopy: return inline_arraycopy();
583
584 case vmIntrinsics::_compareToL: return inline_string_compareTo(StrIntrinsicNode::LL);
585 case vmIntrinsics::_compareToU: return inline_string_compareTo(StrIntrinsicNode::UU);
586 case vmIntrinsics::_compareToLU: return inline_string_compareTo(StrIntrinsicNode::LU);
587 case vmIntrinsics::_compareToUL: return inline_string_compareTo(StrIntrinsicNode::UL);
588
589 case vmIntrinsics::_indexOfL: return inline_string_indexOf(StrIntrinsicNode::LL);
590 case vmIntrinsics::_indexOfU: return inline_string_indexOf(StrIntrinsicNode::UU);
591 case vmIntrinsics::_indexOfUL: return inline_string_indexOf(StrIntrinsicNode::UL);
592 case vmIntrinsics::_indexOfIL: return inline_string_indexOfI(StrIntrinsicNode::LL);
593 case vmIntrinsics::_indexOfIU: return inline_string_indexOfI(StrIntrinsicNode::UU);
594 case vmIntrinsics::_indexOfIUL: return inline_string_indexOfI(StrIntrinsicNode::UL);
595 case vmIntrinsics::_indexOfU_char: return inline_string_indexOfChar();
596
597 case vmIntrinsics::_equalsL: return inline_string_equals(StrIntrinsicNode::LL);
598 case vmIntrinsics::_equalsU: return inline_string_equals(StrIntrinsicNode::UU);
599
600 case vmIntrinsics::_toBytesStringU: return inline_string_toBytesU();
601 case vmIntrinsics::_getCharsStringU: return inline_string_getCharsU();
602 case vmIntrinsics::_getCharStringU: return inline_string_char_access(!is_store);
603 case vmIntrinsics::_putCharStringU: return inline_string_char_access( is_store);
604
605 case vmIntrinsics::_compressStringC:
606 case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
607 case vmIntrinsics::_inflateStringC:
608 case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
609
610 case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer();
611 case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer();
612 case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
613 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
614 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
615 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
616 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
617 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
618 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
619 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
620 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
621 case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_VALUETYPE,Relaxed, false);
622
623 case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
624 case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
625 case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
626 case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
627 case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
628 case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
629 case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
630 case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
631 case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
632 case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_VALUETYPE,Relaxed, false);
633
634 case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
635 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
636 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
637 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
638 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_store, T_CHAR, Volatile, false);
639 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_store, T_INT, Volatile, false);
640 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_store, T_LONG, Volatile, false);
641 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_store, T_FLOAT, Volatile, false);
642 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_store, T_DOUBLE, Volatile, false);
643
644 case vmIntrinsics::_putReferenceVolatile: return inline_unsafe_access( is_store, T_OBJECT, Volatile, false);
645 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access( is_store, T_BOOLEAN, Volatile, false);
646 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access( is_store, T_BYTE, Volatile, false);
647 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access( is_store, T_SHORT, Volatile, false);
648 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access( is_store, T_CHAR, Volatile, false);
649 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access( is_store, T_INT, Volatile, false);
650 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access( is_store, T_LONG, Volatile, false);
651 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access( is_store, T_FLOAT, Volatile, false);
652 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access( is_store, T_DOUBLE, Volatile, false);
653
654 case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, true);
655 case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, true);
656 case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_store, T_INT, Relaxed, true);
657 case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_store, T_LONG, Relaxed, true);
658
659 case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access( is_store, T_SHORT, Relaxed, true);
660 case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access( is_store, T_CHAR, Relaxed, true);
661 case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access( is_store, T_INT, Relaxed, true);
662 case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access( is_store, T_LONG, Relaxed, true);
663
664 case vmIntrinsics::_getReferenceAcquire: return inline_unsafe_access(!is_store, T_OBJECT, Acquire, false);
665 case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_store, T_BOOLEAN, Acquire, false);
666 case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_store, T_BYTE, Acquire, false);
667 case vmIntrinsics::_getShortAcquire: return inline_unsafe_access(!is_store, T_SHORT, Acquire, false);
668 case vmIntrinsics::_getCharAcquire: return inline_unsafe_access(!is_store, T_CHAR, Acquire, false);
669 case vmIntrinsics::_getIntAcquire: return inline_unsafe_access(!is_store, T_INT, Acquire, false);
670 case vmIntrinsics::_getLongAcquire: return inline_unsafe_access(!is_store, T_LONG, Acquire, false);
671 case vmIntrinsics::_getFloatAcquire: return inline_unsafe_access(!is_store, T_FLOAT, Acquire, false);
672 case vmIntrinsics::_getDoubleAcquire: return inline_unsafe_access(!is_store, T_DOUBLE, Acquire, false);
673
674 case vmIntrinsics::_putReferenceRelease: return inline_unsafe_access( is_store, T_OBJECT, Release, false);
675 case vmIntrinsics::_putBooleanRelease: return inline_unsafe_access( is_store, T_BOOLEAN, Release, false);
676 case vmIntrinsics::_putByteRelease: return inline_unsafe_access( is_store, T_BYTE, Release, false);
677 case vmIntrinsics::_putShortRelease: return inline_unsafe_access( is_store, T_SHORT, Release, false);
678 case vmIntrinsics::_putCharRelease: return inline_unsafe_access( is_store, T_CHAR, Release, false);
679 case vmIntrinsics::_putIntRelease: return inline_unsafe_access( is_store, T_INT, Release, false);
680 case vmIntrinsics::_putLongRelease: return inline_unsafe_access( is_store, T_LONG, Release, false);
681 case vmIntrinsics::_putFloatRelease: return inline_unsafe_access( is_store, T_FLOAT, Release, false);
682 case vmIntrinsics::_putDoubleRelease: return inline_unsafe_access( is_store, T_DOUBLE, Release, false);
683
684 case vmIntrinsics::_getReferenceOpaque: return inline_unsafe_access(!is_store, T_OBJECT, Opaque, false);
685 case vmIntrinsics::_getBooleanOpaque: return inline_unsafe_access(!is_store, T_BOOLEAN, Opaque, false);
686 case vmIntrinsics::_getByteOpaque: return inline_unsafe_access(!is_store, T_BYTE, Opaque, false);
687 case vmIntrinsics::_getShortOpaque: return inline_unsafe_access(!is_store, T_SHORT, Opaque, false);
688 case vmIntrinsics::_getCharOpaque: return inline_unsafe_access(!is_store, T_CHAR, Opaque, false);
689 case vmIntrinsics::_getIntOpaque: return inline_unsafe_access(!is_store, T_INT, Opaque, false);
690 case vmIntrinsics::_getLongOpaque: return inline_unsafe_access(!is_store, T_LONG, Opaque, false);
691 case vmIntrinsics::_getFloatOpaque: return inline_unsafe_access(!is_store, T_FLOAT, Opaque, false);
692 case vmIntrinsics::_getDoubleOpaque: return inline_unsafe_access(!is_store, T_DOUBLE, Opaque, false);
693
694 case vmIntrinsics::_putReferenceOpaque: return inline_unsafe_access( is_store, T_OBJECT, Opaque, false);
695 case vmIntrinsics::_putBooleanOpaque: return inline_unsafe_access( is_store, T_BOOLEAN, Opaque, false);
696 case vmIntrinsics::_putByteOpaque: return inline_unsafe_access( is_store, T_BYTE, Opaque, false);
697 case vmIntrinsics::_putShortOpaque: return inline_unsafe_access( is_store, T_SHORT, Opaque, false);
698 case vmIntrinsics::_putCharOpaque: return inline_unsafe_access( is_store, T_CHAR, Opaque, false);
699 case vmIntrinsics::_putIntOpaque: return inline_unsafe_access( is_store, T_INT, Opaque, false);
700 case vmIntrinsics::_putLongOpaque: return inline_unsafe_access( is_store, T_LONG, Opaque, false);
701 case vmIntrinsics::_putFloatOpaque: return inline_unsafe_access( is_store, T_FLOAT, Opaque, false);
702 case vmIntrinsics::_putDoubleOpaque: return inline_unsafe_access( is_store, T_DOUBLE, Opaque, false);
703
704 case vmIntrinsics::_compareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap, Volatile);
705 case vmIntrinsics::_compareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap, Volatile);
706 case vmIntrinsics::_compareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap, Volatile);
707 case vmIntrinsics::_compareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap, Volatile);
708 case vmIntrinsics::_compareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap, Volatile);
709
710 case vmIntrinsics::_weakCompareAndSetReferencePlain: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
711 case vmIntrinsics::_weakCompareAndSetReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
712 case vmIntrinsics::_weakCompareAndSetReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
713 case vmIntrinsics::_weakCompareAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
714 case vmIntrinsics::_weakCompareAndSetBytePlain: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Relaxed);
715 case vmIntrinsics::_weakCompareAndSetByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Acquire);
716 case vmIntrinsics::_weakCompareAndSetByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Release);
717 case vmIntrinsics::_weakCompareAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_swap_weak, Volatile);
718 case vmIntrinsics::_weakCompareAndSetShortPlain: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Relaxed);
719 case vmIntrinsics::_weakCompareAndSetShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Acquire);
720 case vmIntrinsics::_weakCompareAndSetShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Release);
721 case vmIntrinsics::_weakCompareAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_swap_weak, Volatile);
722 case vmIntrinsics::_weakCompareAndSetIntPlain: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Relaxed);
723 case vmIntrinsics::_weakCompareAndSetIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Acquire);
724 case vmIntrinsics::_weakCompareAndSetIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Release);
725 case vmIntrinsics::_weakCompareAndSetInt: return inline_unsafe_load_store(T_INT, LS_cmp_swap_weak, Volatile);
726 case vmIntrinsics::_weakCompareAndSetLongPlain: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Relaxed);
727 case vmIntrinsics::_weakCompareAndSetLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Acquire);
728 case vmIntrinsics::_weakCompareAndSetLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Release);
729 case vmIntrinsics::_weakCompareAndSetLong: return inline_unsafe_load_store(T_LONG, LS_cmp_swap_weak, Volatile);
730
731 case vmIntrinsics::_compareAndExchangeReference: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Volatile);
732 case vmIntrinsics::_compareAndExchangeReferenceAcquire: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Acquire);
733 case vmIntrinsics::_compareAndExchangeReferenceRelease: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange, Release);
734 case vmIntrinsics::_compareAndExchangeByte: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Volatile);
735 case vmIntrinsics::_compareAndExchangeByteAcquire: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Acquire);
736 case vmIntrinsics::_compareAndExchangeByteRelease: return inline_unsafe_load_store(T_BYTE, LS_cmp_exchange, Release);
737 case vmIntrinsics::_compareAndExchangeShort: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Volatile);
738 case vmIntrinsics::_compareAndExchangeShortAcquire: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Acquire);
739 case vmIntrinsics::_compareAndExchangeShortRelease: return inline_unsafe_load_store(T_SHORT, LS_cmp_exchange, Release);
740 case vmIntrinsics::_compareAndExchangeInt: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Volatile);
741 case vmIntrinsics::_compareAndExchangeIntAcquire: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Acquire);
742 case vmIntrinsics::_compareAndExchangeIntRelease: return inline_unsafe_load_store(T_INT, LS_cmp_exchange, Release);
743 case vmIntrinsics::_compareAndExchangeLong: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Volatile);
744 case vmIntrinsics::_compareAndExchangeLongAcquire: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Acquire);
745 case vmIntrinsics::_compareAndExchangeLongRelease: return inline_unsafe_load_store(T_LONG, LS_cmp_exchange, Release);
746
747 case vmIntrinsics::_getAndAddByte: return inline_unsafe_load_store(T_BYTE, LS_get_add, Volatile);
748 case vmIntrinsics::_getAndAddShort: return inline_unsafe_load_store(T_SHORT, LS_get_add, Volatile);
749 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_get_add, Volatile);
750 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_get_add, Volatile);
751
752 case vmIntrinsics::_getAndSetByte: return inline_unsafe_load_store(T_BYTE, LS_get_set, Volatile);
753 case vmIntrinsics::_getAndSetShort: return inline_unsafe_load_store(T_SHORT, LS_get_set, Volatile);
754 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_get_set, Volatile);
755 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_get_set, Volatile);
756 case vmIntrinsics::_getAndSetReference: return inline_unsafe_load_store(T_OBJECT, LS_get_set, Volatile);
757
758 case vmIntrinsics::_loadFence:
759 case vmIntrinsics::_storeFence:
760 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
761
762 case vmIntrinsics::_onSpinWait: return inline_onspinwait();
763
764 case vmIntrinsics::_currentThread: return inline_native_currentThread();
765 case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted();
766
767 #ifdef JFR_HAVE_INTRINSICS
768 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), "counterTime");
769 case vmIntrinsics::_getClassId: return inline_native_classID();
770 case vmIntrinsics::_getEventWriter: return inline_native_getEventWriter();
771 #endif
772 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
773 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
774 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
775 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
776 case vmIntrinsics::_getLength: return inline_native_getLength();
777 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
778 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
779 case vmIntrinsics::_equalsB: return inline_array_equals(StrIntrinsicNode::LL);
780 case vmIntrinsics::_equalsC: return inline_array_equals(StrIntrinsicNode::UU);
781 case vmIntrinsics::_Preconditions_checkIndex: return inline_preconditions_checkIndex();
782 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
783
784 case vmIntrinsics::_allocateUninitializedArray: return inline_unsafe_newArray(true);
785 case vmIntrinsics::_newArray: return inline_unsafe_newArray(false);
786
787 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
788
789 case vmIntrinsics::_isInstance:
790 case vmIntrinsics::_getModifiers:
791 case vmIntrinsics::_isInterface:
792 case vmIntrinsics::_isArray:
793 case vmIntrinsics::_isPrimitive:
794 case vmIntrinsics::_getSuperclass:
795 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
796
797 case vmIntrinsics::_floatToRawIntBits:
798 case vmIntrinsics::_floatToIntBits:
799 case vmIntrinsics::_intBitsToFloat:
800 case vmIntrinsics::_doubleToRawLongBits:
801 case vmIntrinsics::_doubleToLongBits:
802 case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id());
803
804 case vmIntrinsics::_numberOfLeadingZeros_i:
805 case vmIntrinsics::_numberOfLeadingZeros_l:
806 case vmIntrinsics::_numberOfTrailingZeros_i:
807 case vmIntrinsics::_numberOfTrailingZeros_l:
808 case vmIntrinsics::_bitCount_i:
809 case vmIntrinsics::_bitCount_l:
810 case vmIntrinsics::_reverseBytes_i:
811 case vmIntrinsics::_reverseBytes_l:
812 case vmIntrinsics::_reverseBytes_s:
813 case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
814
815 case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
816
817 case vmIntrinsics::_Reference_get: return inline_reference_get();
818
819 case vmIntrinsics::_Class_cast: return inline_Class_cast();
820
821 case vmIntrinsics::_aescrypt_encryptBlock:
822 case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
823
824 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
825 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
826 return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
827
828 case vmIntrinsics::_counterMode_AESCrypt:
829 return inline_counterMode_AESCrypt(intrinsic_id());
830
831 case vmIntrinsics::_sha_implCompress:
832 case vmIntrinsics::_sha2_implCompress:
833 case vmIntrinsics::_sha5_implCompress:
834 return inline_sha_implCompress(intrinsic_id());
835
836 case vmIntrinsics::_digestBase_implCompressMB:
837 return inline_digestBase_implCompressMB(predicate);
838
839 case vmIntrinsics::_multiplyToLen:
840 return inline_multiplyToLen();
841
842 case vmIntrinsics::_squareToLen:
843 return inline_squareToLen();
844
845 case vmIntrinsics::_mulAdd:
846 return inline_mulAdd();
847
848 case vmIntrinsics::_montgomeryMultiply:
849 return inline_montgomeryMultiply();
850 case vmIntrinsics::_montgomerySquare:
851 return inline_montgomerySquare();
852
853 case vmIntrinsics::_vectorizedMismatch:
854 return inline_vectorizedMismatch();
855
856 case vmIntrinsics::_ghash_processBlocks:
857 return inline_ghash_processBlocks();
858 case vmIntrinsics::_base64_encodeBlock:
859 return inline_base64_encodeBlock();
860
861 case vmIntrinsics::_encodeISOArray:
862 case vmIntrinsics::_encodeByteISOArray:
863 return inline_encodeISOArray();
864
865 case vmIntrinsics::_updateCRC32:
866 return inline_updateCRC32();
867 case vmIntrinsics::_updateBytesCRC32:
868 return inline_updateBytesCRC32();
869 case vmIntrinsics::_updateByteBufferCRC32:
870 return inline_updateByteBufferCRC32();
871
872 case vmIntrinsics::_updateBytesCRC32C:
873 return inline_updateBytesCRC32C();
874 case vmIntrinsics::_updateDirectByteBufferCRC32C:
875 return inline_updateDirectByteBufferCRC32C();
876
877 case vmIntrinsics::_updateBytesAdler32:
878 return inline_updateBytesAdler32();
879 case vmIntrinsics::_updateByteBufferAdler32:
880 return inline_updateByteBufferAdler32();
881
882 case vmIntrinsics::_profileBoolean:
883 return inline_profileBoolean();
884 case vmIntrinsics::_isCompileConstant:
885 return inline_isCompileConstant();
886
887 case vmIntrinsics::_hasNegatives:
888 return inline_hasNegatives();
889
890 case vmIntrinsics::_fmaD:
891 case vmIntrinsics::_fmaF:
892 return inline_fma(intrinsic_id());
893
894 case vmIntrinsics::_isDigit:
895 case vmIntrinsics::_isLowerCase:
896 case vmIntrinsics::_isUpperCase:
897 case vmIntrinsics::_isWhitespace:
898 return inline_character_compare(intrinsic_id());
899
900 case vmIntrinsics::_maxF:
901 case vmIntrinsics::_minF:
902 case vmIntrinsics::_maxD:
903 case vmIntrinsics::_minD:
904 return inline_fp_min_max(intrinsic_id());
905
906 default:
907 // If you get here, it may be that someone has added a new intrinsic
908 // to the list in vmSymbols.hpp without implementing it here.
909 #ifndef PRODUCT
910 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
911 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
912 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
913 }
914 #endif
915 return false;
916 }
917 }
918
919 Node* LibraryCallKit::try_to_predicate(int predicate) {
920 if (!jvms()->has_method()) {
921 // Root JVMState has a null method.
922 assert(map()->memory()->Opcode() == Op_Parm, "");
923 // Insert the memory aliasing node
924 set_all_memory(reset_memory());
925 }
926 assert(merged_memory(), "");
927
928 switch (intrinsic_id()) {
929 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
930 return inline_cipherBlockChaining_AESCrypt_predicate(false);
931 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
932 return inline_cipherBlockChaining_AESCrypt_predicate(true);
933 case vmIntrinsics::_counterMode_AESCrypt:
934 return inline_counterMode_AESCrypt_predicate();
935 case vmIntrinsics::_digestBase_implCompressMB:
936 return inline_digestBase_implCompressMB_predicate(predicate);
937
938 default:
939 // If you get here, it may be that someone has added a new intrinsic
940 // to the list in vmSymbols.hpp without implementing it here.
941 #ifndef PRODUCT
942 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
943 tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
944 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
945 }
946 #endif
947 Node* slow_ctl = control();
948 set_control(top()); // No fast path instrinsic
949 return slow_ctl;
950 }
951 }
952
953 //------------------------------set_result-------------------------------
954 // Helper function for finishing intrinsics.
955 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
956 record_for_igvn(region);
957 set_control(_gvn.transform(region));
958 set_result( _gvn.transform(value));
959 assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
960 }
961
962 //------------------------------generate_guard---------------------------
963 // Helper function for generating guarded fast-slow graph structures.
964 // The given 'test', if true, guards a slow path. If the test fails
965 // then a fast path can be taken. (We generally hope it fails.)
966 // In all cases, GraphKit::control() is updated to the fast path.
967 // The returned value represents the control for the slow path.
968 // The return value is never 'top'; it is either a valid control
969 // or NULL if it is obvious that the slow path can never be taken.
970 // Also, if region and the slow control are not NULL, the slow edge
971 // is appended to the region.
972 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
973 if (stopped()) {
974 // Already short circuited.
975 return NULL;
976 }
977
978 // Build an if node and its projections.
979 // If test is true we take the slow path, which we assume is uncommon.
980 if (_gvn.type(test) == TypeInt::ZERO) {
981 // The slow branch is never taken. No need to build this guard.
982 return NULL;
983 }
984
985 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
986
987 Node* if_slow = _gvn.transform(new IfTrueNode(iff));
988 if (if_slow == top()) {
989 // The slow branch is never taken. No need to build this guard.
990 return NULL;
991 }
992
993 if (region != NULL)
994 region->add_req(if_slow);
995
996 Node* if_fast = _gvn.transform(new IfFalseNode(iff));
997 set_control(if_fast);
998
999 return if_slow;
1000 }
1001
1002 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
1003 return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
1004 }
1005 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
1006 return generate_guard(test, region, PROB_FAIR);
1007 }
1008
1009 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
1010 Node* *pos_index) {
1011 if (stopped())
1012 return NULL; // already stopped
1013 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
1014 return NULL; // index is already adequately typed
1015 Node* cmp_lt = _gvn.transform(new CmpINode(index, intcon(0)));
1016 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
1017 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
1018 if (is_neg != NULL && pos_index != NULL) {
1019 // Emulate effect of Parse::adjust_map_after_if.
1020 Node* ccast = new CastIINode(index, TypeInt::POS);
1021 ccast->set_req(0, control());
1022 (*pos_index) = _gvn.transform(ccast);
1023 }
1024 return is_neg;
1025 }
1026
1027 // Make sure that 'position' is a valid limit index, in [0..length].
1028 // There are two equivalent plans for checking this:
1029 // A. (offset + copyLength) unsigned<= arrayLength
1030 // B. offset <= (arrayLength - copyLength)
1031 // We require that all of the values above, except for the sum and
1032 // difference, are already known to be non-negative.
1033 // Plan A is robust in the face of overflow, if offset and copyLength
1034 // are both hugely positive.
1035 //
1036 // Plan B is less direct and intuitive, but it does not overflow at
1037 // all, since the difference of two non-negatives is always
1038 // representable. Whenever Java methods must perform the equivalent
1039 // check they generally use Plan B instead of Plan A.
1040 // For the moment we use Plan A.
1041 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
1042 Node* subseq_length,
1043 Node* array_length,
1044 RegionNode* region) {
1045 if (stopped())
1046 return NULL; // already stopped
1047 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
1048 if (zero_offset && subseq_length->eqv_uncast(array_length))
1049 return NULL; // common case of whole-array copy
1050 Node* last = subseq_length;
1051 if (!zero_offset) // last += offset
1052 last = _gvn.transform(new AddINode(last, offset));
1053 Node* cmp_lt = _gvn.transform(new CmpUNode(array_length, last));
1054 Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt));
1055 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
1056 return is_over;
1057 }
1058
1059 // Emit range checks for the given String.value byte array
1060 void LibraryCallKit::generate_string_range_check(Node* array, Node* offset, Node* count, bool char_count) {
1061 if (stopped()) {
1062 return; // already stopped
1063 }
1064 RegionNode* bailout = new RegionNode(1);
1065 record_for_igvn(bailout);
1066 if (char_count) {
1067 // Convert char count to byte count
1068 count = _gvn.transform(new LShiftINode(count, intcon(1)));
1069 }
1070
1071 // Offset and count must not be negative
1072 generate_negative_guard(offset, bailout);
1073 generate_negative_guard(count, bailout);
1074 // Offset + count must not exceed length of array
1075 generate_limit_guard(offset, count, load_array_length(array), bailout);
1076
1077 if (bailout->req() > 1) {
1078 PreserveJVMState pjvms(this);
1079 set_control(_gvn.transform(bailout));
1080 uncommon_trap(Deoptimization::Reason_intrinsic,
1081 Deoptimization::Action_maybe_recompile);
1082 }
1083 }
1084
1085 //--------------------------generate_current_thread--------------------
1086 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1087 ciKlass* thread_klass = env()->Thread_klass();
1088 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1089 Node* thread = _gvn.transform(new ThreadLocalNode());
1090 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1091 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
1092 tls_output = thread;
1093 return threadObj;
1094 }
1095
1096
1097 //------------------------------make_string_method_node------------------------
1098 // Helper method for String intrinsic functions. This version is called with
1099 // str1 and str2 pointing to byte[] nodes containing Latin1 or UTF16 encoded
1100 // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes
1101 // containing the lengths of str1 and str2.
1102 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) {
1103 Node* result = NULL;
1104 switch (opcode) {
1105 case Op_StrIndexOf:
1106 result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES),
1107 str1_start, cnt1, str2_start, cnt2, ae);
1108 break;
1109 case Op_StrComp:
1110 result = new StrCompNode(control(), memory(TypeAryPtr::BYTES),
1111 str1_start, cnt1, str2_start, cnt2, ae);
1112 break;
1113 case Op_StrEquals:
1114 // We already know that cnt1 == cnt2 here (checked in 'inline_string_equals').
1115 // Use the constant length if there is one because optimized match rule may exist.
1116 result = new StrEqualsNode(control(), memory(TypeAryPtr::BYTES),
1117 str1_start, str2_start, cnt2->is_Con() ? cnt2 : cnt1, ae);
1118 break;
1119 default:
1120 ShouldNotReachHere();
1121 return NULL;
1122 }
1123
1124 // All these intrinsics have checks.
1125 C->set_has_split_ifs(true); // Has chance for split-if optimization
1126 clear_upper_avx();
1127
1128 return _gvn.transform(result);
1129 }
1130
1131 //------------------------------inline_string_compareTo------------------------
1132 bool LibraryCallKit::inline_string_compareTo(StrIntrinsicNode::ArgEnc ae) {
1133 Node* arg1 = argument(0);
1134 Node* arg2 = argument(1);
1135
1136 arg1 = must_be_not_null(arg1, true);
1137 arg2 = must_be_not_null(arg2, true);
1138
1139 arg1 = access_resolve(arg1, ACCESS_READ);
1140 arg2 = access_resolve(arg2, ACCESS_READ);
1141
1142 // Get start addr and length of first argument
1143 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1144 Node* arg1_cnt = load_array_length(arg1);
1145
1146 // Get start addr and length of second argument
1147 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1148 Node* arg2_cnt = load_array_length(arg2);
1149
1150 Node* result = make_string_method_node(Op_StrComp, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1151 set_result(result);
1152 return true;
1153 }
1154
1155 //------------------------------inline_string_equals------------------------
1156 bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) {
1157 Node* arg1 = argument(0);
1158 Node* arg2 = argument(1);
1159
1160 // paths (plus control) merge
1161 RegionNode* region = new RegionNode(3);
1162 Node* phi = new PhiNode(region, TypeInt::BOOL);
1163
1164 if (!stopped()) {
1165
1166 arg1 = must_be_not_null(arg1, true);
1167 arg2 = must_be_not_null(arg2, true);
1168
1169 arg1 = access_resolve(arg1, ACCESS_READ);
1170 arg2 = access_resolve(arg2, ACCESS_READ);
1171
1172 // Get start addr and length of first argument
1173 Node* arg1_start = array_element_address(arg1, intcon(0), T_BYTE);
1174 Node* arg1_cnt = load_array_length(arg1);
1175
1176 // Get start addr and length of second argument
1177 Node* arg2_start = array_element_address(arg2, intcon(0), T_BYTE);
1178 Node* arg2_cnt = load_array_length(arg2);
1179
1180 // Check for arg1_cnt != arg2_cnt
1181 Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt));
1182 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
1183 Node* if_ne = generate_slow_guard(bol, NULL);
1184 if (if_ne != NULL) {
1185 phi->init_req(2, intcon(0));
1186 region->init_req(2, if_ne);
1187 }
1188
1189 // Check for count == 0 is done by assembler code for StrEquals.
1190
1191 if (!stopped()) {
1192 Node* equals = make_string_method_node(Op_StrEquals, arg1_start, arg1_cnt, arg2_start, arg2_cnt, ae);
1193 phi->init_req(1, equals);
1194 region->init_req(1, control());
1195 }
1196 }
1197
1198 // post merge
1199 set_control(_gvn.transform(region));
1200 record_for_igvn(region);
1201
1202 set_result(_gvn.transform(phi));
1203 return true;
1204 }
1205
1206 //------------------------------inline_array_equals----------------------------
1207 bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
1208 assert(ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::LL, "unsupported array types");
1209 Node* arg1 = argument(0);
1210 Node* arg2 = argument(1);
1211
1212 arg1 = access_resolve(arg1, ACCESS_READ);
1213 arg2 = access_resolve(arg2, ACCESS_READ);
1214
1215 const TypeAryPtr* mtype = (ae == StrIntrinsicNode::UU) ? TypeAryPtr::CHARS : TypeAryPtr::BYTES;
1216 set_result(_gvn.transform(new AryEqNode(control(), memory(mtype), arg1, arg2, ae)));
1217 clear_upper_avx();
1218
1219 return true;
1220 }
1221
1222 //------------------------------inline_hasNegatives------------------------------
1223 bool LibraryCallKit::inline_hasNegatives() {
1224 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1225 return false;
1226 }
1227
1228 assert(callee()->signature()->size() == 3, "hasNegatives has 3 parameters");
1229 // no receiver since it is static method
1230 Node* ba = argument(0);
1231 Node* offset = argument(1);
1232 Node* len = argument(2);
1233
1234 ba = must_be_not_null(ba, true);
1235
1236 // Range checks
1237 generate_string_range_check(ba, offset, len, false);
1238 if (stopped()) {
1239 return true;
1240 }
1241 ba = access_resolve(ba, ACCESS_READ);
1242 Node* ba_start = array_element_address(ba, offset, T_BYTE);
1243 Node* result = new HasNegativesNode(control(), memory(TypeAryPtr::BYTES), ba_start, len);
1244 set_result(_gvn.transform(result));
1245 return true;
1246 }
1247
1248 bool LibraryCallKit::inline_preconditions_checkIndex() {
1249 Node* index = argument(0);
1250 Node* length = argument(1);
1251 if (too_many_traps(Deoptimization::Reason_intrinsic) || too_many_traps(Deoptimization::Reason_range_check)) {
1252 return false;
1253 }
1254
1255 Node* len_pos_cmp = _gvn.transform(new CmpINode(length, intcon(0)));
1256 Node* len_pos_bol = _gvn.transform(new BoolNode(len_pos_cmp, BoolTest::ge));
1257
1258 {
1259 BuildCutout unless(this, len_pos_bol, PROB_MAX);
1260 uncommon_trap(Deoptimization::Reason_intrinsic,
1261 Deoptimization::Action_make_not_entrant);
1262 }
1263
1264 if (stopped()) {
1265 return false;
1266 }
1267
1268 Node* rc_cmp = _gvn.transform(new CmpUNode(index, length));
1269 BoolTest::mask btest = BoolTest::lt;
1270 Node* rc_bool = _gvn.transform(new BoolNode(rc_cmp, btest));
1271 RangeCheckNode* rc = new RangeCheckNode(control(), rc_bool, PROB_MAX, COUNT_UNKNOWN);
1272 _gvn.set_type(rc, rc->Value(&_gvn));
1273 if (!rc_bool->is_Con()) {
1274 record_for_igvn(rc);
1275 }
1276 set_control(_gvn.transform(new IfTrueNode(rc)));
1277 {
1278 PreserveJVMState pjvms(this);
1279 set_control(_gvn.transform(new IfFalseNode(rc)));
1280 uncommon_trap(Deoptimization::Reason_range_check,
1281 Deoptimization::Action_make_not_entrant);
1282 }
1283
1284 if (stopped()) {
1285 return false;
1286 }
1287
1288 Node* result = new CastIINode(index, TypeInt::make(0, _gvn.type(length)->is_int()->_hi, Type::WidenMax));
1289 result->set_req(0, control());
1290 result = _gvn.transform(result);
1291 set_result(result);
1292 replace_in_map(index, result);
1293 clear_upper_avx();
1294 return true;
1295 }
1296
1297 //------------------------------inline_string_indexOf------------------------
1298 bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
1299 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1300 return false;
1301 }
1302 Node* src = argument(0);
1303 Node* tgt = argument(1);
1304
1305 // Make the merge point
1306 RegionNode* result_rgn = new RegionNode(4);
1307 Node* result_phi = new PhiNode(result_rgn, TypeInt::INT);
1308
1309 src = must_be_not_null(src, true);
1310 tgt = must_be_not_null(tgt, true);
1311
1312 src = access_resolve(src, ACCESS_READ);
1313 tgt = access_resolve(tgt, ACCESS_READ);
1314
1315 // Get start addr and length of source string
1316 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
1317 Node* src_count = load_array_length(src);
1318
1319 // Get start addr and length of substring
1320 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1321 Node* tgt_count = load_array_length(tgt);
1322
1323 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) {
1324 // Divide src size by 2 if String is UTF16 encoded
1325 src_count = _gvn.transform(new RShiftINode(src_count, intcon(1)));
1326 }
1327 if (ae == StrIntrinsicNode::UU) {
1328 // Divide substring size by 2 if String is UTF16 encoded
1329 tgt_count = _gvn.transform(new RShiftINode(tgt_count, intcon(1)));
1330 }
1331
1332 Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, result_rgn, result_phi, ae);
1333 if (result != NULL) {
1334 result_phi->init_req(3, result);
1335 result_rgn->init_req(3, control());
1336 }
1337 set_control(_gvn.transform(result_rgn));
1338 record_for_igvn(result_rgn);
1339 set_result(_gvn.transform(result_phi));
1340
1341 return true;
1342 }
1343
1344 //-----------------------------inline_string_indexOf-----------------------
1345 bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
1346 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1347 return false;
1348 }
1349 if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
1350 return false;
1351 }
1352 assert(callee()->signature()->size() == 5, "String.indexOf() has 5 arguments");
1353 Node* src = argument(0); // byte[]
1354 Node* src_count = argument(1); // char count
1355 Node* tgt = argument(2); // byte[]
1356 Node* tgt_count = argument(3); // char count
1357 Node* from_index = argument(4); // char index
1358
1359 src = must_be_not_null(src, true);
1360 tgt = must_be_not_null(tgt, true);
1361
1362 src = access_resolve(src, ACCESS_READ);
1363 tgt = access_resolve(tgt, ACCESS_READ);
1364
1365 // Multiply byte array index by 2 if String is UTF16 encoded
1366 Node* src_offset = (ae == StrIntrinsicNode::LL) ? from_index : _gvn.transform(new LShiftINode(from_index, intcon(1)));
1367 src_count = _gvn.transform(new SubINode(src_count, from_index));
1368 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1369 Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
1370
1371 // Range checks
1372 generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL);
1373 generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU);
1374 if (stopped()) {
1375 return true;
1376 }
1377
1378 RegionNode* region = new RegionNode(5);
1379 Node* phi = new PhiNode(region, TypeInt::INT);
1380
1381 Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, region, phi, ae);
1382 if (result != NULL) {
1383 // The result is index relative to from_index if substring was found, -1 otherwise.
1384 // Generate code which will fold into cmove.
1385 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1386 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1387
1388 Node* if_lt = generate_slow_guard(bol, NULL);
1389 if (if_lt != NULL) {
1390 // result == -1
1391 phi->init_req(3, result);
1392 region->init_req(3, if_lt);
1393 }
1394 if (!stopped()) {
1395 result = _gvn.transform(new AddINode(result, from_index));
1396 phi->init_req(4, result);
1397 region->init_req(4, control());
1398 }
1399 }
1400
1401 set_control(_gvn.transform(region));
1402 record_for_igvn(region);
1403 set_result(_gvn.transform(phi));
1404 clear_upper_avx();
1405
1406 return true;
1407 }
1408
1409 // Create StrIndexOfNode with fast path checks
1410 Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* tgt_start, Node* tgt_count,
1411 RegionNode* region, Node* phi, StrIntrinsicNode::ArgEnc ae) {
1412 // Check for substr count > string count
1413 Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count));
1414 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt));
1415 Node* if_gt = generate_slow_guard(bol, NULL);
1416 if (if_gt != NULL) {
1417 phi->init_req(1, intcon(-1));
1418 region->init_req(1, if_gt);
1419 }
1420 if (!stopped()) {
1421 // Check for substr count == 0
1422 cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0)));
1423 bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
1424 Node* if_zero = generate_slow_guard(bol, NULL);
1425 if (if_zero != NULL) {
1426 phi->init_req(2, intcon(0));
1427 region->init_req(2, if_zero);
1428 }
1429 }
1430 if (!stopped()) {
1431 return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae);
1432 }
1433 return NULL;
1434 }
1435
1436 //-----------------------------inline_string_indexOfChar-----------------------
1437 bool LibraryCallKit::inline_string_indexOfChar() {
1438 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1439 return false;
1440 }
1441 if (!Matcher::match_rule_supported(Op_StrIndexOfChar)) {
1442 return false;
1443 }
1444 assert(callee()->signature()->size() == 4, "String.indexOfChar() has 4 arguments");
1445 Node* src = argument(0); // byte[]
1446 Node* tgt = argument(1); // tgt is int ch
1447 Node* from_index = argument(2);
1448 Node* max = argument(3);
1449
1450 src = must_be_not_null(src, true);
1451 src = access_resolve(src, ACCESS_READ);
1452
1453 Node* src_offset = _gvn.transform(new LShiftINode(from_index, intcon(1)));
1454 Node* src_start = array_element_address(src, src_offset, T_BYTE);
1455 Node* src_count = _gvn.transform(new SubINode(max, from_index));
1456
1457 // Range checks
1458 generate_string_range_check(src, src_offset, src_count, true);
1459 if (stopped()) {
1460 return true;
1461 }
1462
1463 RegionNode* region = new RegionNode(3);
1464 Node* phi = new PhiNode(region, TypeInt::INT);
1465
1466 Node* result = new StrIndexOfCharNode(control(), memory(TypeAryPtr::BYTES), src_start, src_count, tgt, StrIntrinsicNode::none);
1467 C->set_has_split_ifs(true); // Has chance for split-if optimization
1468 _gvn.transform(result);
1469
1470 Node* cmp = _gvn.transform(new CmpINode(result, intcon(0)));
1471 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt));
1472
1473 Node* if_lt = generate_slow_guard(bol, NULL);
1474 if (if_lt != NULL) {
1475 // result == -1
1476 phi->init_req(2, result);
1477 region->init_req(2, if_lt);
1478 }
1479 if (!stopped()) {
1480 result = _gvn.transform(new AddINode(result, from_index));
1481 phi->init_req(1, result);
1482 region->init_req(1, control());
1483 }
1484 set_control(_gvn.transform(region));
1485 record_for_igvn(region);
1486 set_result(_gvn.transform(phi));
1487
1488 return true;
1489 }
1490 //---------------------------inline_string_copy---------------------
1491 // compressIt == true --> generate a compressed copy operation (compress char[]/byte[] to byte[])
1492 // int StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len)
1493 // int StringUTF16.compress(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1494 // compressIt == false --> generate an inflated copy operation (inflate byte[] to char[]/byte[])
1495 // void StringLatin1.inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len)
1496 // void StringLatin1.inflate(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
1497 bool LibraryCallKit::inline_string_copy(bool compress) {
1498 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1499 return false;
1500 }
1501 int nargs = 5; // 2 oops, 3 ints
1502 assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
1503
1504 Node* src = argument(0);
1505 Node* src_offset = argument(1);
1506 Node* dst = argument(2);
1507 Node* dst_offset = argument(3);
1508 Node* length = argument(4);
1509
1510 // Check for allocation before we add nodes that would confuse
1511 // tightly_coupled_allocation()
1512 AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
1513
1514 // Figure out the size and type of the elements we will be copying.
1515 const Type* src_type = src->Value(&_gvn);
1516 const Type* dst_type = dst->Value(&_gvn);
1517 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
1518 BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
1519 assert((compress && dst_elem == T_BYTE && (src_elem == T_BYTE || src_elem == T_CHAR)) ||
1520 (!compress && src_elem == T_BYTE && (dst_elem == T_BYTE || dst_elem == T_CHAR)),
1521 "Unsupported array types for inline_string_copy");
1522
1523 src = must_be_not_null(src, true);
1524 dst = must_be_not_null(dst, true);
1525
1526 // Convert char[] offsets to byte[] offsets
1527 bool convert_src = (compress && src_elem == T_BYTE);
1528 bool convert_dst = (!compress && dst_elem == T_BYTE);
1529 if (convert_src) {
1530 src_offset = _gvn.transform(new LShiftINode(src_offset, intcon(1)));
1531 } else if (convert_dst) {
1532 dst_offset = _gvn.transform(new LShiftINode(dst_offset, intcon(1)));
1533 }
1534
1535 // Range checks
1536 generate_string_range_check(src, src_offset, length, convert_src);
1537 generate_string_range_check(dst, dst_offset, length, convert_dst);
1538 if (stopped()) {
1539 return true;
1540 }
1541
1542 src = access_resolve(src, ACCESS_READ);
1543 dst = access_resolve(dst, ACCESS_WRITE);
1544
1545 Node* src_start = array_element_address(src, src_offset, src_elem);
1546 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
1547 // 'src_start' points to src array + scaled offset
1548 // 'dst_start' points to dst array + scaled offset
1549 Node* count = NULL;
1550 if (compress) {
1551 count = compress_string(src_start, TypeAryPtr::get_array_body_type(src_elem), dst_start, length);
1552 } else {
1553 inflate_string(src_start, dst_start, TypeAryPtr::get_array_body_type(dst_elem), length);
1554 }
1555
1556 if (alloc != NULL) {
1557 if (alloc->maybe_set_complete(&_gvn)) {
1558 // "You break it, you buy it."
1559 InitializeNode* init = alloc->initialization();
1560 assert(init->is_complete(), "we just did this");
1561 init->set_complete_with_arraycopy();
1562 assert(dst->is_CheckCastPP(), "sanity");
1563 assert(dst->in(0)->in(0) == init, "dest pinned");
1564 }
1565 // Do not let stores that initialize this object be reordered with
1566 // a subsequent store that would make this object accessible by
1567 // other threads.
1568 // Record what AllocateNode this StoreStore protects so that
1569 // escape analysis can go from the MemBarStoreStoreNode to the
1570 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1571 // based on the escape status of the AllocateNode.
1572 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1573 }
1574 if (compress) {
1575 set_result(_gvn.transform(count));
1576 }
1577 clear_upper_avx();
1578
1579 return true;
1580 }
1581
1582 #ifdef _LP64
1583 #define XTOP ,top() /*additional argument*/
1584 #else //_LP64
1585 #define XTOP /*no additional argument*/
1586 #endif //_LP64
1587
1588 //------------------------inline_string_toBytesU--------------------------
1589 // public static byte[] StringUTF16.toBytes(char[] value, int off, int len)
1590 bool LibraryCallKit::inline_string_toBytesU() {
1591 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1592 return false;
1593 }
1594 // Get the arguments.
1595 Node* value = argument(0);
1596 Node* offset = argument(1);
1597 Node* length = argument(2);
1598
1599 Node* newcopy = NULL;
1600
1601 // Set the original stack and the reexecute bit for the interpreter to reexecute
1602 // the bytecode that invokes StringUTF16.toBytes() if deoptimization happens.
1603 { PreserveReexecuteState preexecs(this);
1604 jvms()->set_should_reexecute(true);
1605
1606 // Check if a null path was taken unconditionally.
1607 value = null_check(value);
1608
1609 RegionNode* bailout = new RegionNode(1);
1610 record_for_igvn(bailout);
1611
1612 // Range checks
1613 generate_negative_guard(offset, bailout);
1614 generate_negative_guard(length, bailout);
1615 generate_limit_guard(offset, length, load_array_length(value), bailout);
1616 // Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
1617 generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout);
1618
1619 if (bailout->req() > 1) {
1620 PreserveJVMState pjvms(this);
1621 set_control(_gvn.transform(bailout));
1622 uncommon_trap(Deoptimization::Reason_intrinsic,
1623 Deoptimization::Action_maybe_recompile);
1624 }
1625 if (stopped()) {
1626 return true;
1627 }
1628
1629 Node* size = _gvn.transform(new LShiftINode(length, intcon(1)));
1630 Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE)));
1631 newcopy = new_array(klass_node, size, 0); // no arguments to push
1632 AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy, NULL);
1633
1634 // Calculate starting addresses.
1635 value = access_resolve(value, ACCESS_READ);
1636 Node* src_start = array_element_address(value, offset, T_CHAR);
1637 Node* dst_start = basic_plus_adr(newcopy, arrayOopDesc::base_offset_in_bytes(T_BYTE));
1638
1639 // Check if src array address is aligned to HeapWordSize (dst is always aligned)
1640 const TypeInt* toffset = gvn().type(offset)->is_int();
1641 bool aligned = toffset->is_con() && ((toffset->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1642
1643 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1644 const char* copyfunc_name = "arraycopy";
1645 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1646 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1647 OptoRuntime::fast_arraycopy_Type(),
1648 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1649 src_start, dst_start, ConvI2X(length) XTOP);
1650 // Do not let reads from the cloned object float above the arraycopy.
1651 if (alloc != NULL) {
1652 if (alloc->maybe_set_complete(&_gvn)) {
1653 // "You break it, you buy it."
1654 InitializeNode* init = alloc->initialization();
1655 assert(init->is_complete(), "we just did this");
1656 init->set_complete_with_arraycopy();
1657 assert(newcopy->is_CheckCastPP(), "sanity");
1658 assert(newcopy->in(0)->in(0) == init, "dest pinned");
1659 }
1660 // Do not let stores that initialize this object be reordered with
1661 // a subsequent store that would make this object accessible by
1662 // other threads.
1663 // Record what AllocateNode this StoreStore protects so that
1664 // escape analysis can go from the MemBarStoreStoreNode to the
1665 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1666 // based on the escape status of the AllocateNode.
1667 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1668 } else {
1669 insert_mem_bar(Op_MemBarCPUOrder);
1670 }
1671 } // original reexecute is set back here
1672
1673 C->set_has_split_ifs(true); // Has chance for split-if optimization
1674 if (!stopped()) {
1675 set_result(newcopy);
1676 }
1677 clear_upper_avx();
1678
1679 return true;
1680 }
1681
1682 //------------------------inline_string_getCharsU--------------------------
1683 // public void StringUTF16.getChars(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
1684 bool LibraryCallKit::inline_string_getCharsU() {
1685 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
1686 return false;
1687 }
1688
1689 // Get the arguments.
1690 Node* src = argument(0);
1691 Node* src_begin = argument(1);
1692 Node* src_end = argument(2); // exclusive offset (i < src_end)
1693 Node* dst = argument(3);
1694 Node* dst_begin = argument(4);
1695
1696 // Check for allocation before we add nodes that would confuse
1697 // tightly_coupled_allocation()
1698 AllocateArrayNode* alloc = tightly_coupled_allocation(dst, NULL);
1699
1700 // Check if a null path was taken unconditionally.
1701 src = null_check(src);
1702 dst = null_check(dst);
1703 if (stopped()) {
1704 return true;
1705 }
1706
1707 // Get length and convert char[] offset to byte[] offset
1708 Node* length = _gvn.transform(new SubINode(src_end, src_begin));
1709 src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
1710
1711 // Range checks
1712 generate_string_range_check(src, src_begin, length, true);
1713 generate_string_range_check(dst, dst_begin, length, false);
1714 if (stopped()) {
1715 return true;
1716 }
1717
1718 if (!stopped()) {
1719 src = access_resolve(src, ACCESS_READ);
1720 dst = access_resolve(dst, ACCESS_WRITE);
1721
1722 // Calculate starting addresses.
1723 Node* src_start = array_element_address(src, src_begin, T_BYTE);
1724 Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
1725
1726 // Check if array addresses are aligned to HeapWordSize
1727 const TypeInt* tsrc = gvn().type(src_begin)->is_int();
1728 const TypeInt* tdst = gvn().type(dst_begin)->is_int();
1729 bool aligned = tsrc->is_con() && ((tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
1730 tdst->is_con() && ((tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
1731
1732 // Figure out which arraycopy runtime method to call (disjoint, uninitialized).
1733 const char* copyfunc_name = "arraycopy";
1734 address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
1735 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
1736 OptoRuntime::fast_arraycopy_Type(),
1737 copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
1738 src_start, dst_start, ConvI2X(length) XTOP);
1739 // Do not let reads from the cloned object float above the arraycopy.
1740 if (alloc != NULL) {
1741 if (alloc->maybe_set_complete(&_gvn)) {
1742 // "You break it, you buy it."
1743 InitializeNode* init = alloc->initialization();
1744 assert(init->is_complete(), "we just did this");
1745 init->set_complete_with_arraycopy();
1746 assert(dst->is_CheckCastPP(), "sanity");
1747 assert(dst->in(0)->in(0) == init, "dest pinned");
1748 }
1749 // Do not let stores that initialize this object be reordered with
1750 // a subsequent store that would make this object accessible by
1751 // other threads.
1752 // Record what AllocateNode this StoreStore protects so that
1753 // escape analysis can go from the MemBarStoreStoreNode to the
1754 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
1755 // based on the escape status of the AllocateNode.
1756 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
1757 } else {
1758 insert_mem_bar(Op_MemBarCPUOrder);
1759 }
1760 }
1761
1762 C->set_has_split_ifs(true); // Has chance for split-if optimization
1763 return true;
1764 }
1765
1766 //----------------------inline_string_char_access----------------------------
1767 // Store/Load char to/from byte[] array.
1768 // static void StringUTF16.putChar(byte[] val, int index, int c)
1769 // static char StringUTF16.getChar(byte[] val, int index)
1770 bool LibraryCallKit::inline_string_char_access(bool is_store) {
1771 Node* value = argument(0);
1772 Node* index = argument(1);
1773 Node* ch = is_store ? argument(2) : NULL;
1774
1775 // This intrinsic accesses byte[] array as char[] array. Computing the offsets
1776 // correctly requires matched array shapes.
1777 assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE),
1778 "sanity: byte[] and char[] bases agree");
1779 assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2,
1780 "sanity: byte[] and char[] scales agree");
1781
1782 // Bail when getChar over constants is requested: constant folding would
1783 // reject folding mismatched char access over byte[]. A normal inlining for getChar
1784 // Java method would constant fold nicely instead.
1785 if (!is_store && value->is_Con() && index->is_Con()) {
1786 return false;
1787 }
1788
1789 value = must_be_not_null(value, true);
1790 value = access_resolve(value, is_store ? ACCESS_WRITE : ACCESS_READ);
1791
1792 Node* adr = array_element_address(value, index, T_CHAR);
1793 if (adr->is_top()) {
1794 return false;
1795 }
1796 if (is_store) {
1797 access_store_at(value, adr, TypeAryPtr::BYTES, ch, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED);
1798 } else {
1799 ch = access_load_at(value, adr, TypeAryPtr::BYTES, TypeInt::CHAR, T_CHAR, IN_HEAP | MO_UNORDERED | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD);
1800 set_result(ch);
1801 }
1802 return true;
1803 }
1804
1805 //--------------------------round_double_node--------------------------------
1806 // Round a double node if necessary.
1807 Node* LibraryCallKit::round_double_node(Node* n) {
1808 if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1809 n = _gvn.transform(new RoundDoubleNode(0, n));
1810 return n;
1811 }
1812
1813 //------------------------------inline_math-----------------------------------
1814 // public static double Math.abs(double)
1815 // public static double Math.sqrt(double)
1816 // public static double Math.log(double)
1817 // public static double Math.log10(double)
1818 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1819 Node* arg = round_double_node(argument(0));
1820 Node* n = NULL;
1821 switch (id) {
1822 case vmIntrinsics::_dabs: n = new AbsDNode( arg); break;
1823 case vmIntrinsics::_dsqrt: n = new SqrtDNode(C, control(), arg); break;
1824 default: fatal_unexpected_iid(id); break;
1825 }
1826 set_result(_gvn.transform(n));
1827 return true;
1828 }
1829
1830 //------------------------------runtime_math-----------------------------
1831 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1832 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1833 "must be (DD)D or (D)D type");
1834
1835 // Inputs
1836 Node* a = round_double_node(argument(0));
1837 Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL;
1838
1839 const TypePtr* no_memory_effects = NULL;
1840 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1841 no_memory_effects,
1842 a, top(), b, b ? top() : NULL);
1843 Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0));
1844 #ifdef ASSERT
1845 Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1));
1846 assert(value_top == top(), "second value must be top");
1847 #endif
1848
1849 set_result(value);
1850 return true;
1851 }
1852
1853 //------------------------------inline_math_native-----------------------------
1854 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1855 #define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
1856 switch (id) {
1857 // These intrinsics are not properly supported on all hardware
1858 case vmIntrinsics::_dsin:
1859 return StubRoutines::dsin() != NULL ?
1860 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsin(), "dsin") :
1861 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin), "SIN");
1862 case vmIntrinsics::_dcos:
1863 return StubRoutines::dcos() != NULL ?
1864 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcos(), "dcos") :
1865 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos), "COS");
1866 case vmIntrinsics::_dtan:
1867 return StubRoutines::dtan() != NULL ?
1868 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtan(), "dtan") :
1869 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan), "TAN");
1870 case vmIntrinsics::_dlog:
1871 return StubRoutines::dlog() != NULL ?
1872 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog(), "dlog") :
1873 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG");
1874 case vmIntrinsics::_dlog10:
1875 return StubRoutines::dlog10() != NULL ?
1876 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") :
1877 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
1878
1879 // These intrinsics are supported on all hardware
1880 case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false;
1881 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false;
1882
1883 case vmIntrinsics::_dexp:
1884 return StubRoutines::dexp() != NULL ?
1885 runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") :
1886 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
1887 case vmIntrinsics::_dpow: {
1888 Node* exp = round_double_node(argument(2));
1889 const TypeD* d = _gvn.type(exp)->isa_double_constant();
1890 if (d != NULL && d->getd() == 2.0) {
1891 // Special case: pow(x, 2.0) => x * x
1892 Node* base = round_double_node(argument(0));
1893 set_result(_gvn.transform(new MulDNode(base, base)));
1894 return true;
1895 }
1896 return StubRoutines::dpow() != NULL ?
1897 runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") :
1898 runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow), "POW");
1899 }
1900 #undef FN_PTR
1901
1902 // These intrinsics are not yet correctly implemented
1903 case vmIntrinsics::_datan2:
1904 return false;
1905
1906 default:
1907 fatal_unexpected_iid(id);
1908 return false;
1909 }
1910 }
1911
1912 static bool is_simple_name(Node* n) {
1913 return (n->req() == 1 // constant
1914 || (n->is_Type() && n->as_Type()->type()->singleton())
1915 || n->is_Proj() // parameter or return value
1916 || n->is_Phi() // local of some sort
1917 );
1918 }
1919
1920 //----------------------------inline_notify-----------------------------------*
1921 bool LibraryCallKit::inline_notify(vmIntrinsics::ID id) {
1922 const TypeFunc* ftype = OptoRuntime::monitor_notify_Type();
1923 address func;
1924 if (id == vmIntrinsics::_notify) {
1925 func = OptoRuntime::monitor_notify_Java();
1926 } else {
1927 func = OptoRuntime::monitor_notifyAll_Java();
1928 }
1929 Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, NULL, TypeRawPtr::BOTTOM, argument(0));
1930 make_slow_call_ex(call, env()->Throwable_klass(), false);
1931 return true;
1932 }
1933
1934
1935 //----------------------------inline_min_max-----------------------------------
1936 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1937 set_result(generate_min_max(id, argument(0), argument(1)));
1938 return true;
1939 }
1940
1941 void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
1942 Node* bol = _gvn.transform( new BoolNode(test, BoolTest::overflow) );
1943 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
1944 Node* fast_path = _gvn.transform( new IfFalseNode(check));
1945 Node* slow_path = _gvn.transform( new IfTrueNode(check) );
1946
1947 {
1948 PreserveJVMState pjvms(this);
1949 PreserveReexecuteState preexecs(this);
1950 jvms()->set_should_reexecute(true);
1951
1952 set_control(slow_path);
1953 set_i_o(i_o());
1954
1955 uncommon_trap(Deoptimization::Reason_intrinsic,
1956 Deoptimization::Action_none);
1957 }
1958
1959 set_control(fast_path);
1960 set_result(math);
1961 }
1962
1963 template <typename OverflowOp>
1964 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
1965 typedef typename OverflowOp::MathOp MathOp;
1966
1967 MathOp* mathOp = new MathOp(arg1, arg2);
1968 Node* operation = _gvn.transform( mathOp );
1969 Node* ofcheck = _gvn.transform( new OverflowOp(arg1, arg2) );
1970 inline_math_mathExact(operation, ofcheck);
1971 return true;
1972 }
1973
1974 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
1975 return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
1976 }
1977
1978 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
1979 return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
1980 }
1981
1982 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
1983 return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
1984 }
1985
1986 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
1987 return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
1988 }
1989
1990 bool LibraryCallKit::inline_math_negateExactI() {
1991 return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
1992 }
1993
1994 bool LibraryCallKit::inline_math_negateExactL() {
1995 return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
1996 }
1997
1998 bool LibraryCallKit::inline_math_multiplyExactI() {
1999 return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
2000 }
2001
2002 bool LibraryCallKit::inline_math_multiplyExactL() {
2003 return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
2004 }
2005
2006 bool LibraryCallKit::inline_math_multiplyHigh() {
2007 set_result(_gvn.transform(new MulHiLNode(argument(0), argument(2))));
2008 return true;
2009 }
2010
2011 Node*
2012 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
2013 // These are the candidate return value:
2014 Node* xvalue = x0;
2015 Node* yvalue = y0;
2016
2017 if (xvalue == yvalue) {
2018 return xvalue;
2019 }
2020
2021 bool want_max = (id == vmIntrinsics::_max);
2022
2023 const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
2024 const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
2025 if (txvalue == NULL || tyvalue == NULL) return top();
2026 // This is not really necessary, but it is consistent with a
2027 // hypothetical MaxINode::Value method:
2028 int widen = MAX2(txvalue->_widen, tyvalue->_widen);
2029
2030 // %%% This folding logic should (ideally) be in a different place.
2031 // Some should be inside IfNode, and there to be a more reliable
2032 // transformation of ?: style patterns into cmoves. We also want
2033 // more powerful optimizations around cmove and min/max.
2034
2035 // Try to find a dominating comparison of these guys.
2036 // It can simplify the index computation for Arrays.copyOf
2037 // and similar uses of System.arraycopy.
2038 // First, compute the normalized version of CmpI(x, y).
2039 int cmp_op = Op_CmpI;
2040 Node* xkey = xvalue;
2041 Node* ykey = yvalue;
2042 Node* ideal_cmpxy = _gvn.transform(new CmpINode(xkey, ykey));
2043 if (ideal_cmpxy->is_Cmp()) {
2044 // E.g., if we have CmpI(length - offset, count),
2045 // it might idealize to CmpI(length, count + offset)
2046 cmp_op = ideal_cmpxy->Opcode();
2047 xkey = ideal_cmpxy->in(1);
2048 ykey = ideal_cmpxy->in(2);
2049 }
2050
2051 // Start by locating any relevant comparisons.
2052 Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
2053 Node* cmpxy = NULL;
2054 Node* cmpyx = NULL;
2055 for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
2056 Node* cmp = start_from->fast_out(k);
2057 if (cmp->outcnt() > 0 && // must have prior uses
2058 cmp->in(0) == NULL && // must be context-independent
2059 cmp->Opcode() == cmp_op) { // right kind of compare
2060 if (cmp->in(1) == xkey && cmp->in(2) == ykey) cmpxy = cmp;
2061 if (cmp->in(1) == ykey && cmp->in(2) == xkey) cmpyx = cmp;
2062 }
2063 }
2064
2065 const int NCMPS = 2;
2066 Node* cmps[NCMPS] = { cmpxy, cmpyx };
2067 int cmpn;
2068 for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2069 if (cmps[cmpn] != NULL) break; // find a result
2070 }
2071 if (cmpn < NCMPS) {
2072 // Look for a dominating test that tells us the min and max.
2073 int depth = 0; // Limit search depth for speed
2074 Node* dom = control();
2075 for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
2076 if (++depth >= 100) break;
2077 Node* ifproj = dom;
2078 if (!ifproj->is_Proj()) continue;
2079 Node* iff = ifproj->in(0);
2080 if (!iff->is_If()) continue;
2081 Node* bol = iff->in(1);
2082 if (!bol->is_Bool()) continue;
2083 Node* cmp = bol->in(1);
2084 if (cmp == NULL) continue;
2085 for (cmpn = 0; cmpn < NCMPS; cmpn++)
2086 if (cmps[cmpn] == cmp) break;
2087 if (cmpn == NCMPS) continue;
2088 BoolTest::mask btest = bol->as_Bool()->_test._test;
2089 if (ifproj->is_IfFalse()) btest = BoolTest(btest).negate();
2090 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
2091 // At this point, we know that 'x btest y' is true.
2092 switch (btest) {
2093 case BoolTest::eq:
2094 // They are proven equal, so we can collapse the min/max.
2095 // Either value is the answer. Choose the simpler.
2096 if (is_simple_name(yvalue) && !is_simple_name(xvalue))
2097 return yvalue;
2098 return xvalue;
2099 case BoolTest::lt: // x < y
2100 case BoolTest::le: // x <= y
2101 return (want_max ? yvalue : xvalue);
2102 case BoolTest::gt: // x > y
2103 case BoolTest::ge: // x >= y
2104 return (want_max ? xvalue : yvalue);
2105 default:
2106 break;
2107 }
2108 }
2109 }
2110
2111 // We failed to find a dominating test.
2112 // Let's pick a test that might GVN with prior tests.
2113 Node* best_bol = NULL;
2114 BoolTest::mask best_btest = BoolTest::illegal;
2115 for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2116 Node* cmp = cmps[cmpn];
2117 if (cmp == NULL) continue;
2118 for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
2119 Node* bol = cmp->fast_out(j);
2120 if (!bol->is_Bool()) continue;
2121 BoolTest::mask btest = bol->as_Bool()->_test._test;
2122 if (btest == BoolTest::eq || btest == BoolTest::ne) continue;
2123 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
2124 if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
2125 best_bol = bol->as_Bool();
2126 best_btest = btest;
2127 }
2128 }
2129 }
2130
2131 Node* answer_if_true = NULL;
2132 Node* answer_if_false = NULL;
2133 switch (best_btest) {
2134 default:
2135 if (cmpxy == NULL)
2136 cmpxy = ideal_cmpxy;
2137 best_bol = _gvn.transform(new BoolNode(cmpxy, BoolTest::lt));
2138 // and fall through:
2139 case BoolTest::lt: // x < y
2140 case BoolTest::le: // x <= y
2141 answer_if_true = (want_max ? yvalue : xvalue);
2142 answer_if_false = (want_max ? xvalue : yvalue);
2143 break;
2144 case BoolTest::gt: // x > y
2145 case BoolTest::ge: // x >= y
2146 answer_if_true = (want_max ? xvalue : yvalue);
2147 answer_if_false = (want_max ? yvalue : xvalue);
2148 break;
2149 }
2150
2151 jint hi, lo;
2152 if (want_max) {
2153 // We can sharpen the minimum.
2154 hi = MAX2(txvalue->_hi, tyvalue->_hi);
2155 lo = MAX2(txvalue->_lo, tyvalue->_lo);
2156 } else {
2157 // We can sharpen the maximum.
2158 hi = MIN2(txvalue->_hi, tyvalue->_hi);
2159 lo = MIN2(txvalue->_lo, tyvalue->_lo);
2160 }
2161
2162 // Use a flow-free graph structure, to avoid creating excess control edges
2163 // which could hinder other optimizations.
2164 // Since Math.min/max is often used with arraycopy, we want
2165 // tightly_coupled_allocation to be able to see beyond min/max expressions.
2166 Node* cmov = CMoveNode::make(NULL, best_bol,
2167 answer_if_false, answer_if_true,
2168 TypeInt::make(lo, hi, widen));
2169
2170 return _gvn.transform(cmov);
2171
2172 /*
2173 // This is not as desirable as it may seem, since Min and Max
2174 // nodes do not have a full set of optimizations.
2175 // And they would interfere, anyway, with 'if' optimizations
2176 // and with CMoveI canonical forms.
2177 switch (id) {
2178 case vmIntrinsics::_min:
2179 result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2180 case vmIntrinsics::_max:
2181 result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2182 default:
2183 ShouldNotReachHere();
2184 }
2185 */
2186 }
2187
2188 inline int
2189 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) {
2190 const TypePtr* base_type = TypePtr::NULL_PTR;
2191 if (base != NULL) base_type = _gvn.type(base)->isa_ptr();
2192 if (base_type == NULL) {
2193 // Unknown type.
2194 return Type::AnyPtr;
2195 } else if (base_type == TypePtr::NULL_PTR) {
2196 // Since this is a NULL+long form, we have to switch to a rawptr.
2197 base = _gvn.transform(new CastX2PNode(offset));
2198 offset = MakeConX(0);
2199 return Type::RawPtr;
2200 } else if (base_type->base() == Type::RawPtr) {
2201 return Type::RawPtr;
2202 } else if (base_type->isa_oopptr()) {
2203 // Base is never null => always a heap address.
2204 if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
2205 return Type::OopPtr;
2206 }
2207 // Offset is small => always a heap address.
2208 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2209 if (offset_type != NULL &&
2210 base_type->offset() == 0 && // (should always be?)
2211 offset_type->_lo >= 0 &&
2212 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2213 return Type::OopPtr;
2214 } else if (type == T_OBJECT) {
2215 // off heap access to an oop doesn't make any sense. Has to be on
2216 // heap.
2217 return Type::OopPtr;
2218 }
2219 // Otherwise, it might either be oop+off or NULL+addr.
2220 return Type::AnyPtr;
2221 } else {
2222 // No information:
2223 return Type::AnyPtr;
2224 }
2225 }
2226
2227 inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, DecoratorSet decorators, BasicType type, bool can_cast) {
2228 Node* uncasted_base = base;
2229 int kind = classify_unsafe_addr(uncasted_base, offset, type);
2230 if (kind == Type::RawPtr) {
2231 return basic_plus_adr(top(), uncasted_base, offset);
2232 } else if (kind == Type::AnyPtr) {
2233 assert(base == uncasted_base, "unexpected base change");
2234 if (can_cast) {
2235 if (!_gvn.type(base)->speculative_maybe_null() &&
2236 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2237 // According to profiling, this access is always on
2238 // heap. Casting the base to not null and thus avoiding membars
2239 // around the access should allow better optimizations
2240 Node* null_ctl = top();
2241 base = null_check_oop(base, &null_ctl, true, true, true);
2242 assert(null_ctl->is_top(), "no null control here");
2243 return basic_plus_adr(base, offset);
2244 } else if (_gvn.type(base)->speculative_always_null() &&
2245 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2246 // According to profiling, this access is always off
2247 // heap.
2248 base = null_assert(base);
2249 Node* raw_base = _gvn.transform(new CastX2PNode(offset));
2250 offset = MakeConX(0);
2251 return basic_plus_adr(top(), raw_base, offset);
2252 }
2253 }
2254 // We don't know if it's an on heap or off heap access. Fall back
2255 // to raw memory access.
2256 base = access_resolve(base, decorators);
2257 Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
2258 return basic_plus_adr(top(), raw, offset);
2259 } else {
2260 assert(base == uncasted_base, "unexpected base change");
2261 // We know it's an on heap access so base can't be null
2262 if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
2263 base = must_be_not_null(base, true);
2264 }
2265 return basic_plus_adr(base, offset);
2266 }
2267 }
2268
2269 //--------------------------inline_number_methods-----------------------------
2270 // inline int Integer.numberOfLeadingZeros(int)
2271 // inline int Long.numberOfLeadingZeros(long)
2272 //
2273 // inline int Integer.numberOfTrailingZeros(int)
2274 // inline int Long.numberOfTrailingZeros(long)
2275 //
2276 // inline int Integer.bitCount(int)
2277 // inline int Long.bitCount(long)
2278 //
2279 // inline char Character.reverseBytes(char)
2280 // inline short Short.reverseBytes(short)
2281 // inline int Integer.reverseBytes(int)
2282 // inline long Long.reverseBytes(long)
2283 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2284 Node* arg = argument(0);
2285 Node* n = NULL;
2286 switch (id) {
2287 case vmIntrinsics::_numberOfLeadingZeros_i: n = new CountLeadingZerosINode( arg); break;
2288 case vmIntrinsics::_numberOfLeadingZeros_l: n = new CountLeadingZerosLNode( arg); break;
2289 case vmIntrinsics::_numberOfTrailingZeros_i: n = new CountTrailingZerosINode(arg); break;
2290 case vmIntrinsics::_numberOfTrailingZeros_l: n = new CountTrailingZerosLNode(arg); break;
2291 case vmIntrinsics::_bitCount_i: n = new PopCountINode( arg); break;
2292 case vmIntrinsics::_bitCount_l: n = new PopCountLNode( arg); break;
2293 case vmIntrinsics::_reverseBytes_c: n = new ReverseBytesUSNode(0, arg); break;
2294 case vmIntrinsics::_reverseBytes_s: n = new ReverseBytesSNode( 0, arg); break;
2295 case vmIntrinsics::_reverseBytes_i: n = new ReverseBytesINode( 0, arg); break;
2296 case vmIntrinsics::_reverseBytes_l: n = new ReverseBytesLNode( 0, arg); break;
2297 default: fatal_unexpected_iid(id); break;
2298 }
2299 set_result(_gvn.transform(n));
2300 return true;
2301 }
2302
2303 //----------------------------inline_unsafe_access----------------------------
2304
2305 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) {
2306 // Attempt to infer a sharper value type from the offset and base type.
2307 ciKlass* sharpened_klass = NULL;
2308
2309 // See if it is an instance field, with an object type.
2310 if (alias_type->field() != NULL) {
2311 if (alias_type->field()->type()->is_klass()) {
2312 sharpened_klass = alias_type->field()->type()->as_klass();
2313 }
2314 }
2315
2316 // See if it is a narrow oop array.
2317 if (adr_type->isa_aryptr()) {
2318 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2319 const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2320 if (elem_type != NULL) {
2321 sharpened_klass = elem_type->klass();
2322 }
2323 }
2324 }
2325
2326 // The sharpened class might be unloaded if there is no class loader
2327 // contraint in place.
2328 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2329 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2330
2331 #ifndef PRODUCT
2332 if (C->print_intrinsics() || C->print_inlining()) {
2333 tty->print(" from base type: "); adr_type->dump(); tty->cr();
2334 tty->print(" sharpened value: "); tjp->dump(); tty->cr();
2335 }
2336 #endif
2337 // Sharpen the value type.
2338 return tjp;
2339 }
2340 return NULL;
2341 }
2342
2343 DecoratorSet LibraryCallKit::mo_decorator_for_access_kind(AccessKind kind) {
2344 switch (kind) {
2345 case Relaxed:
2346 return MO_UNORDERED;
2347 case Opaque:
2348 return MO_RELAXED;
2349 case Acquire:
2350 return MO_ACQUIRE;
2351 case Release:
2352 return MO_RELEASE;
2353 case Volatile:
2354 return MO_SEQ_CST;
2355 default:
2356 ShouldNotReachHere();
2357 return 0;
2358 }
2359 }
2360
2361 bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, const AccessKind kind, const bool unaligned) {
2362 if (callee()->is_static()) return false; // caller must have the capability!
2363 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2364 guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
2365 guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
2366 assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
2367
2368 if (type == T_OBJECT || type == T_ARRAY) {
2369 decorators |= ON_UNKNOWN_OOP_REF;
2370 }
2371
2372 if (unaligned) {
2373 decorators |= C2_UNALIGNED;
2374 }
2375
2376 #ifndef PRODUCT
2377 {
2378 ResourceMark rm;
2379 // Check the signatures.
2380 ciSignature* sig = callee()->signature();
2381 #ifdef ASSERT
2382 if (!is_store) {
2383 // Object getReference(Object base, int/long offset), etc.
2384 BasicType rtype = sig->return_type()->basic_type();
2385 assert(rtype == type || (rtype == T_OBJECT && type == T_VALUETYPE), "getter must return the expected value");
2386 assert(sig->count() == 2 || (type == T_VALUETYPE && sig->count() == 3), "oop getter has 2 or 3 arguments");
2387 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2388 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2389 } else {
2390 // void putReference(Object base, int/long offset, Object x), etc.
2391 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2392 assert(sig->count() == 3 || (type == T_VALUETYPE && sig->count() == 4), "oop putter has 3 arguments");
2393 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2394 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2395 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2396 assert(vtype == type || (type == T_VALUETYPE && vtype == T_OBJECT), "putter must accept the expected value");
2397 }
2398 #endif // ASSERT
2399 }
2400 #endif //PRODUCT
2401
2402 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2403
2404 Node* receiver = argument(0); // type: oop
2405
2406 // Build address expression.
2407 Node* adr;
2408 Node* heap_base_oop = top();
2409 Node* offset = top();
2410 Node* val;
2411
2412 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2413 Node* base = argument(1); // type: oop
2414 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2415 offset = argument(2); // type: long
2416 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2417 // to be plain byte offsets, which are also the same as those accepted
2418 // by oopDesc::field_addr.
2419 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2420 "fieldOffset must be byte-scaled");
2421
2422 ciValueKlass* value_klass = NULL;
2423 if (type == T_VALUETYPE) {
2424 Node* cls = null_check(argument(4));
2425 if (stopped()) {
2426 return true;
2427 }
2428 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
2429 const TypeKlassPtr* kls_t = _gvn.type(kls)->isa_klassptr();
2430 if (!kls_t->klass_is_exact()) {
2431 return false;
2432 }
2433 ciKlass* klass = kls_t->klass();
2434 if (!klass->is_valuetype()) {
2435 return false;
2436 }
2437 value_klass = klass->as_value_klass();
2438 }
2439
2440 receiver = null_check(receiver);
2441 if (stopped()) {
2442 return true;
2443 }
2444
2445 if (base->is_ValueType()) {
2446 ValueTypeNode* vt = base->as_ValueType();
2447
2448 if (is_store) {
2449 if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) {
2450 return false;
2451 }
2452 base = vt->get_oop();
2453 } else {
2454 if (offset->is_Con()) {
2455 long off = find_long_con(offset, 0);
2456 ciValueKlass* vk = _gvn.type(vt)->is_valuetype()->value_klass();
2457 if ((long)(int)off != off || !vk->contains_field_offset(off)) {
2458 return false;
2459 }
2460
2461 ciField* f = vk->get_non_flattened_field_by_offset((int)off);
2462
2463 if (f != NULL) {
2464 BasicType bt = f->layout_type();
2465 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2466 bt = T_OBJECT;
2467 }
2468 if (bt == type) {
2469 if (bt != T_VALUETYPE || f->type() == value_klass) {
2470 set_result(vt->field_value_by_offset((int)off, false));
2471 return true;
2472 }
2473 }
2474 }
2475 }
2476 vt = vt->allocate(this)->as_ValueType();
2477 base = vt->get_oop();
2478 }
2479 }
2480
2481 // 32-bit machines ignore the high half!
2482 offset = ConvL2X(offset);
2483 adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
2484
2485 if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
2486 heap_base_oop = base;
2487 } else if (type == T_OBJECT || (value_klass != NULL && value_klass->has_object_fields())) {
2488 return false; // off-heap oop accesses are not supported
2489 }
2490
2491 // Can base be NULL? Otherwise, always on-heap access.
2492 bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
2493
2494 if (!can_access_non_heap) {
2495 decorators |= IN_HEAP;
2496 }
2497
2498 val = is_store ? argument(4 + (type == T_VALUETYPE ? 1 : 0)) : NULL;
2499
2500 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2501
2502 // Try to categorize the address.
2503 Compile::AliasType* alias_type = C->alias_type(adr_type);
2504 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2505
2506 if (alias_type->adr_type() == TypeInstPtr::KLASS ||
2507 alias_type->adr_type() == TypeAryPtr::RANGE) {
2508 return false; // not supported
2509 }
2510
2511 bool mismatched = false;
2512 BasicType bt = T_ILLEGAL;
2513 ciField* field = NULL;
2514 if (adr_type->isa_instptr()) {
2515 const TypeInstPtr* instptr = adr_type->is_instptr();
2516 ciInstanceKlass* k = instptr->klass()->as_instance_klass();
2517 int off = instptr->offset();
2518 if (instptr->const_oop() != NULL &&
2519 instptr->klass() == ciEnv::current()->Class_klass() &&
2520 instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) {
2521 k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
2522 field = k->get_field_by_offset(off, true);
2523 } else {
2524 field = k->get_non_flattened_field_by_offset(off);
2525 }
2526 if (field != NULL) {
2527 bt = field->layout_type();
2528 }
2529 assert(bt == alias_type->basic_type() || bt == T_VALUETYPE, "should match");
2530 if (field != NULL && bt == T_VALUETYPE && !field->is_flattened()) {
2531 bt = T_OBJECT;
2532 }
2533 } else {
2534 bt = alias_type->basic_type();
2535 }
2536
2537 if (bt != T_ILLEGAL) {
2538 assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
2539 if (bt == T_BYTE && adr_type->isa_aryptr()) {
2540 // Alias type doesn't differentiate between byte[] and boolean[]).
2541 // Use address type to get the element type.
2542 bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
2543 }
2544 if (bt == T_ARRAY || bt == T_NARROWOOP) {
2545 // accessing an array field with getReference is not a mismatch
2546 bt = T_OBJECT;
2547 }
2548 if ((bt == T_OBJECT) != (type == T_OBJECT)) {
2549 // Don't intrinsify mismatched object accesses
2550 return false;
2551 }
2552 mismatched = (bt != type);
2553 } else if (alias_type->adr_type()->isa_oopptr()) {
2554 mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
2555 }
2556
2557 if (type == T_VALUETYPE) {
2558 if (adr_type->isa_instptr()) {
2559 if (field == NULL || field->type() != value_klass) {
2560 mismatched = true;
2561 }
2562 } else if (adr_type->isa_aryptr()) {
2563 const Type* elem = adr_type->is_aryptr()->elem();
2564 if (!elem->isa_valuetype()) {
2565 mismatched = true;
2566 } else if (elem->is_valuetype()->value_klass() != value_klass) {
2567 mismatched = true;
2568 }
2569 }
2570 if (is_store) {
2571 const Type* val_t = _gvn.type(val);
2572 if (!val_t->isa_valuetype() ||
2573 val_t->is_valuetype()->value_klass() != value_klass) {
2574 return false;
2575 }
2576 }
2577 }
2578
2579 assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
2580
2581 if (mismatched) {
2582 decorators |= C2_MISMATCHED;
2583 }
2584
2585 // First guess at the value type.
2586 const Type *value_type = Type::get_const_basic_type(type);
2587
2588 // Figure out the memory ordering.
2589 decorators |= mo_decorator_for_access_kind(kind);
2590
2591 if (!is_store) {
2592 if (type == T_OBJECT) {
2593 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2594 if (tjp != NULL) {
2595 value_type = tjp;
2596 }
2597 } else if (type == T_VALUETYPE) {
2598 value_type = NULL;
2599 }
2600 }
2601
2602 // Heap pointers get a null-check from the interpreter,
2603 // as a courtesy. However, this is not guaranteed by Unsafe,
2604 // and it is not possible to fully distinguish unintended nulls
2605 // from intended ones in this API.
2606
2607 if (!is_store) {
2608 Node* p = NULL;
2609 // Try to constant fold a load from a constant field
2610
2611 if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
2612 // final or stable field
2613 p = make_constant_from_field(field, heap_base_oop);
2614 }
2615
2616 if (p == NULL) { // Could not constant fold the load
2617 if (type == T_VALUETYPE) {
2618 if (adr_type->isa_instptr() && !mismatched) {
2619 ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2620 int offset = adr_type->is_instptr()->offset();
2621 p = ValueTypeNode::make_from_flattened(this, value_klass, base, base, holder, offset, decorators);
2622 } else {
2623 p = ValueTypeNode::make_from_flattened(this, value_klass, base, adr, NULL, 0, decorators);
2624 }
2625 } else {
2626 p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
2627 }
2628 // Normalize the value returned by getBoolean in the following cases
2629 if (type == T_BOOLEAN &&
2630 (mismatched ||
2631 heap_base_oop == top() || // - heap_base_oop is NULL or
2632 (can_access_non_heap && field == NULL)) // - heap_base_oop is potentially NULL
2633 // and the unsafe access is made to large offset
2634 // (i.e., larger than the maximum offset necessary for any
2635 // field access)
2636 ) {
2637 IdealKit ideal = IdealKit(this);
2638 #define __ ideal.
2639 IdealVariable normalized_result(ideal);
2640 __ declarations_done();
2641 __ set(normalized_result, p);
2642 __ if_then(p, BoolTest::ne, ideal.ConI(0));
2643 __ set(normalized_result, ideal.ConI(1));
2644 ideal.end_if();
2645 final_sync(ideal);
2646 p = __ value(normalized_result);
2647 #undef __
2648 }
2649 }
2650 if (type == T_ADDRESS) {
2651 p = gvn().transform(new CastP2XNode(NULL, p));
2652 p = ConvX2UL(p);
2653 }
2654 if (field != NULL && field->is_flattenable()&& !field->is_flattened()) {
2655 // Load a non-flattened but flattenable value type from memory
2656 if (value_type->value_klass()->is_scalarizable()) {
2657 p = ValueTypeNode::make_from_oop(this, p, value_type->value_klass());
2658 } else {
2659 p = null2default(p, value_type->value_klass());
2660 }
2661 }
2662 // The load node has the control of the preceding MemBarCPUOrder. All
2663 // following nodes will have the control of the MemBarCPUOrder inserted at
2664 // the end of this method. So, pushing the load onto the stack at a later
2665 // point is fine.
2666 set_result(p);
2667 } else {
2668 if (bt == T_ADDRESS) {
2669 // Repackage the long as a pointer.
2670 val = ConvL2X(val);
2671 val = gvn().transform(new CastX2PNode(val));
2672 }
2673 if (type == T_VALUETYPE) {
2674 if (adr_type->isa_instptr() && !mismatched) {
2675 ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
2676 int offset = adr_type->is_instptr()->offset();
2677 val->as_ValueType()->store_flattened(this, base, base, holder, offset, decorators);
2678 } else {
2679 val->as_ValueType()->store_flattened(this, base, adr, NULL, 0, decorators);
2680 }
2681 } else {
2682 access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
2683 }
2684 }
2685
2686 if (argument(1)->is_ValueType() && is_store) {
2687 Node* value = ValueTypeNode::make_from_oop(this, base, _gvn.type(base)->value_klass());
2688 value = value->as_ValueType()->make_larval(this, false);
2689 replace_in_map(argument(1), value);
2690 }
2691
2692 return true;
2693 }
2694
2695 bool LibraryCallKit::inline_unsafe_make_private_buffer() {
2696 Node* receiver = argument(0);
2697 Node* value = argument(1);
2698
2699 receiver = null_check(receiver);
2700 if (stopped()) {
2701 return true;
2702 }
2703
2704 if (!value->is_ValueType()) {
2705 return false;
2706 }
2707
2708 set_result(value->as_ValueType()->make_larval(this, true));
2709
2710 return true;
2711 }
2712
2713 bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
2714 Node* receiver = argument(0);
2715 Node* buffer = argument(1);
2716
2717 receiver = null_check(receiver);
2718 if (stopped()) {
2719 return true;
2720 }
2721
2722 if (!buffer->is_ValueType()) {
2723 return false;
2724 }
2725
2726 ValueTypeNode* vt = buffer->as_ValueType();
2727 if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) {
2728 return false;
2729 }
2730
2731 set_result(vt->finish_larval(this));
2732
2733 return true;
2734 }
2735
2736 //----------------------------inline_unsafe_load_store----------------------------
2737 // This method serves a couple of different customers (depending on LoadStoreKind):
2738 //
2739 // LS_cmp_swap:
2740 //
2741 // boolean compareAndSetReference(Object o, long offset, Object expected, Object x);
2742 // boolean compareAndSetInt( Object o, long offset, int expected, int x);
2743 // boolean compareAndSetLong( Object o, long offset, long expected, long x);
2744 //
2745 // LS_cmp_swap_weak:
2746 //
2747 // boolean weakCompareAndSetReference( Object o, long offset, Object expected, Object x);
2748 // boolean weakCompareAndSetReferencePlain( Object o, long offset, Object expected, Object x);
2749 // boolean weakCompareAndSetReferenceAcquire(Object o, long offset, Object expected, Object x);
2750 // boolean weakCompareAndSetReferenceRelease(Object o, long offset, Object expected, Object x);
2751 //
2752 // boolean weakCompareAndSetInt( Object o, long offset, int expected, int x);
2753 // boolean weakCompareAndSetIntPlain( Object o, long offset, int expected, int x);
2754 // boolean weakCompareAndSetIntAcquire( Object o, long offset, int expected, int x);
2755 // boolean weakCompareAndSetIntRelease( Object o, long offset, int expected, int x);
2756 //
2757 // boolean weakCompareAndSetLong( Object o, long offset, long expected, long x);
2758 // boolean weakCompareAndSetLongPlain( Object o, long offset, long expected, long x);
2759 // boolean weakCompareAndSetLongAcquire( Object o, long offset, long expected, long x);
2760 // boolean weakCompareAndSetLongRelease( Object o, long offset, long expected, long x);
2761 //
2762 // LS_cmp_exchange:
2763 //
2764 // Object compareAndExchangeReferenceVolatile(Object o, long offset, Object expected, Object x);
2765 // Object compareAndExchangeReferenceAcquire( Object o, long offset, Object expected, Object x);
2766 // Object compareAndExchangeReferenceRelease( Object o, long offset, Object expected, Object x);
2767 //
2768 // Object compareAndExchangeIntVolatile( Object o, long offset, Object expected, Object x);
2769 // Object compareAndExchangeIntAcquire( Object o, long offset, Object expected, Object x);
2770 // Object compareAndExchangeIntRelease( Object o, long offset, Object expected, Object x);
2771 //
2772 // Object compareAndExchangeLongVolatile( Object o, long offset, Object expected, Object x);
2773 // Object compareAndExchangeLongAcquire( Object o, long offset, Object expected, Object x);
2774 // Object compareAndExchangeLongRelease( Object o, long offset, Object expected, Object x);
2775 //
2776 // LS_get_add:
2777 //
2778 // int getAndAddInt( Object o, long offset, int delta)
2779 // long getAndAddLong(Object o, long offset, long delta)
2780 //
2781 // LS_get_set:
2782 //
2783 // int getAndSet(Object o, long offset, int newValue)
2784 // long getAndSet(Object o, long offset, long newValue)
2785 // Object getAndSet(Object o, long offset, Object newValue)
2786 //
2787 bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadStoreKind kind, const AccessKind access_kind) {
2788 // This basic scheme here is the same as inline_unsafe_access, but
2789 // differs in enough details that combining them would make the code
2790 // overly confusing. (This is a true fact! I originally combined
2791 // them, but even I was confused by it!) As much code/comments as
2792 // possible are retained from inline_unsafe_access though to make
2793 // the correspondences clearer. - dl
2794
2795 if (callee()->is_static()) return false; // caller must have the capability!
2796
2797 DecoratorSet decorators = C2_UNSAFE_ACCESS;
2798 decorators |= mo_decorator_for_access_kind(access_kind);
2799
2800 #ifndef PRODUCT
2801 BasicType rtype;
2802 {
2803 ResourceMark rm;
2804 // Check the signatures.
2805 ciSignature* sig = callee()->signature();
2806 rtype = sig->return_type()->basic_type();
2807 switch(kind) {
2808 case LS_get_add:
2809 case LS_get_set: {
2810 // Check the signatures.
2811 #ifdef ASSERT
2812 assert(rtype == type, "get and set must return the expected type");
2813 assert(sig->count() == 3, "get and set has 3 arguments");
2814 assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2815 assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2816 assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2817 assert(access_kind == Volatile, "mo is not passed to intrinsic nodes in current implementation");
2818 #endif // ASSERT
2819 break;
2820 }
2821 case LS_cmp_swap:
2822 case LS_cmp_swap_weak: {
2823 // Check the signatures.
2824 #ifdef ASSERT
2825 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2826 assert(sig->count() == 4, "CAS has 4 arguments");
2827 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2828 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2829 #endif // ASSERT
2830 break;
2831 }
2832 case LS_cmp_exchange: {
2833 // Check the signatures.
2834 #ifdef ASSERT
2835 assert(rtype == type, "CAS must return the expected type");
2836 assert(sig->count() == 4, "CAS has 4 arguments");
2837 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2838 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2839 #endif // ASSERT
2840 break;
2841 }
2842 default:
2843 ShouldNotReachHere();
2844 }
2845 }
2846 #endif //PRODUCT
2847
2848 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2849
2850 // Get arguments:
2851 Node* receiver = NULL;
2852 Node* base = NULL;
2853 Node* offset = NULL;
2854 Node* oldval = NULL;
2855 Node* newval = NULL;
2856 switch(kind) {
2857 case LS_cmp_swap:
2858 case LS_cmp_swap_weak:
2859 case LS_cmp_exchange: {
2860 const bool two_slot_type = type2size[type] == 2;
2861 receiver = argument(0); // type: oop
2862 base = argument(1); // type: oop
2863 offset = argument(2); // type: long
2864 oldval = argument(4); // type: oop, int, or long
2865 newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
2866 break;
2867 }
2868 case LS_get_add:
2869 case LS_get_set: {
2870 receiver = argument(0); // type: oop
2871 base = argument(1); // type: oop
2872 offset = argument(2); // type: long
2873 oldval = NULL;
2874 newval = argument(4); // type: oop, int, or long
2875 break;
2876 }
2877 default:
2878 ShouldNotReachHere();
2879 }
2880
2881 // Build field offset expression.
2882 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2883 // to be plain byte offsets, which are also the same as those accepted
2884 // by oopDesc::field_addr.
2885 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2886 // 32-bit machines ignore the high half of long offsets
2887 offset = ConvL2X(offset);
2888 Node* adr = make_unsafe_address(base, offset, ACCESS_WRITE | ACCESS_READ, type, false);
2889 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2890
2891 Compile::AliasType* alias_type = C->alias_type(adr_type);
2892 BasicType bt = alias_type->basic_type();
2893 if (bt != T_ILLEGAL &&
2894 ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
2895 // Don't intrinsify mismatched object accesses.
2896 return false;
2897 }
2898
2899 // For CAS, unlike inline_unsafe_access, there seems no point in
2900 // trying to refine types. Just use the coarse types here.
2901 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2902 const Type *value_type = Type::get_const_basic_type(type);
2903
2904 switch (kind) {
2905 case LS_get_set:
2906 case LS_cmp_exchange: {
2907 if (type == T_OBJECT) {
2908 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2909 if (tjp != NULL) {
2910 value_type = tjp;
2911 }
2912 }
2913 break;
2914 }
2915 case LS_cmp_swap:
2916 case LS_cmp_swap_weak:
2917 case LS_get_add:
2918 break;
2919 default:
2920 ShouldNotReachHere();
2921 }
2922
2923 // Null check receiver.
2924 receiver = null_check(receiver);
2925 if (stopped()) {
2926 return true;
2927 }
2928
2929 int alias_idx = C->get_alias_index(adr_type);
2930
2931 if (type == T_OBJECT || type == T_ARRAY) {
2932 decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF;
2933
2934 // Transformation of a value which could be NULL pointer (CastPP #NULL)
2935 // could be delayed during Parse (for example, in adjust_map_after_if()).
2936 // Execute transformation here to avoid barrier generation in such case.
2937 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2938 newval = _gvn.makecon(TypePtr::NULL_PTR);
2939
2940 if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) {
2941 // Refine the value to a null constant, when it is known to be null
2942 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2943 }
2944 }
2945
2946 Node* result = NULL;
2947 switch (kind) {
2948 case LS_cmp_exchange: {
2949 result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx,
2950 oldval, newval, value_type, type, decorators);
2951 break;
2952 }
2953 case LS_cmp_swap_weak:
2954 decorators |= C2_WEAK_CMPXCHG;
2955 case LS_cmp_swap: {
2956 result = access_atomic_cmpxchg_bool_at(base, adr, adr_type, alias_idx,
2957 oldval, newval, value_type, type, decorators);
2958 break;
2959 }
2960 case LS_get_set: {
2961 result = access_atomic_xchg_at(base, adr, adr_type, alias_idx,
2962 newval, value_type, type, decorators);
2963 break;
2964 }
2965 case LS_get_add: {
2966 result = access_atomic_add_at(base, adr, adr_type, alias_idx,
2967 newval, value_type, type, decorators);
2968 break;
2969 }
2970 default:
2971 ShouldNotReachHere();
2972 }
2973
2974 assert(type2size[result->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2975 set_result(result);
2976 return true;
2977 }
2978
2979 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
2980 // Regardless of form, don't allow previous ld/st to move down,
2981 // then issue acquire, release, or volatile mem_bar.
2982 insert_mem_bar(Op_MemBarCPUOrder);
2983 switch(id) {
2984 case vmIntrinsics::_loadFence:
2985 insert_mem_bar(Op_LoadFence);
2986 return true;
2987 case vmIntrinsics::_storeFence:
2988 insert_mem_bar(Op_StoreFence);
2989 return true;
2990 case vmIntrinsics::_fullFence:
2991 insert_mem_bar(Op_MemBarVolatile);
2992 return true;
2993 default:
2994 fatal_unexpected_iid(id);
2995 return false;
2996 }
2997 }
2998
2999 bool LibraryCallKit::inline_onspinwait() {
3000 insert_mem_bar(Op_OnSpinWait);
3001 return true;
3002 }
3003
3004 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
3005 if (!kls->is_Con()) {
3006 return true;
3007 }
3008 const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
3009 if (klsptr == NULL) {
3010 return true;
3011 }
3012 ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
3013 // don't need a guard for a klass that is already initialized
3014 return !ik->is_initialized();
3015 }
3016
3017 //----------------------------inline_unsafe_allocate---------------------------
3018 // public native Object Unsafe.allocateInstance(Class<?> cls);
3019 bool LibraryCallKit::inline_unsafe_allocate() {
3020 if (callee()->is_static()) return false; // caller must have the capability!
3021
3022 null_check_receiver(); // null-check, then ignore
3023 Node* cls = null_check(argument(1));
3024 if (stopped()) return true;
3025
3026 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3027 kls = null_check(kls);
3028 if (stopped()) return true; // argument was like int.class
3029
3030 Node* test = NULL;
3031 if (LibraryCallKit::klass_needs_init_guard(kls)) {
3032 // Note: The argument might still be an illegal value like
3033 // Serializable.class or Object[].class. The runtime will handle it.
3034 // But we must make an explicit check for initialization.
3035 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3036 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3037 // can generate code to load it as unsigned byte.
3038 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3039 Node* bits = intcon(InstanceKlass::fully_initialized);
3040 test = _gvn.transform(new SubINode(inst, bits));
3041 // The 'test' is non-zero if we need to take a slow path.
3042 }
3043
3044 Node* obj = new_instance(kls, test);
3045 set_result(obj);
3046 return true;
3047 }
3048
3049 //------------------------inline_native_time_funcs--------------
3050 // inline code for System.currentTimeMillis() and System.nanoTime()
3051 // these have the same type and signature
3052 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3053 const TypeFunc* tf = OptoRuntime::void_long_Type();
3054 const TypePtr* no_memory_effects = NULL;
3055 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3056 Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0));
3057 #ifdef ASSERT
3058 Node* value_top = _gvn.transform(new ProjNode(time, TypeFunc::Parms+1));
3059 assert(value_top == top(), "second value must be top");
3060 #endif
3061 set_result(value);
3062 return true;
3063 }
3064
3065 #ifdef JFR_HAVE_INTRINSICS
3066
3067 /*
3068 * oop -> myklass
3069 * myklass->trace_id |= USED
3070 * return myklass->trace_id & ~0x3
3071 */
3072 bool LibraryCallKit::inline_native_classID() {
3073 Node* cls = null_check(argument(0), T_OBJECT);
3074 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3075 kls = null_check(kls, T_OBJECT);
3076
3077 ByteSize offset = KLASS_TRACE_ID_OFFSET;
3078 Node* insp = basic_plus_adr(kls, in_bytes(offset));
3079 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
3080
3081 Node* clsused = longcon(0x01l); // set the class bit
3082 Node* orl = _gvn.transform(new OrLNode(tvalue, clsused));
3083 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
3084 store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
3085
3086 #ifdef TRACE_ID_META_BITS
3087 Node* mbits = longcon(~TRACE_ID_META_BITS);
3088 tvalue = _gvn.transform(new AndLNode(tvalue, mbits));
3089 #endif
3090 #ifdef TRACE_ID_SHIFT
3091 Node* cbits = intcon(TRACE_ID_SHIFT);
3092 tvalue = _gvn.transform(new URShiftLNode(tvalue, cbits));
3093 #endif
3094
3095 set_result(tvalue);
3096 return true;
3097
3098 }
3099
3100 bool LibraryCallKit::inline_native_getEventWriter() {
3101 Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
3102
3103 Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
3104 in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
3105
3106 Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
3107
3108 Node* jobj_cmp_null = _gvn.transform( new CmpPNode(jobj, null()) );
3109 Node* test_jobj_eq_null = _gvn.transform( new BoolNode(jobj_cmp_null, BoolTest::eq) );
3110
3111 IfNode* iff_jobj_null =
3112 create_and_map_if(control(), test_jobj_eq_null, PROB_MIN, COUNT_UNKNOWN);
3113
3114 enum { _normal_path = 1,
3115 _null_path = 2,
3116 PATH_LIMIT };
3117
3118 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3119 PhiNode* result_val = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);
3120
3121 Node* jobj_is_null = _gvn.transform(new IfTrueNode(iff_jobj_null));
3122 result_rgn->init_req(_null_path, jobj_is_null);
3123 result_val->init_req(_null_path, null());
3124
3125 Node* jobj_is_not_null = _gvn.transform(new IfFalseNode(iff_jobj_null));
3126 set_control(jobj_is_not_null);
3127 Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
3128 IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
3129 result_rgn->init_req(_normal_path, control());
3130 result_val->init_req(_normal_path, res);
3131
3132 set_result(result_rgn, result_val);
3133
3134 return true;
3135 }
3136
3137 #endif // JFR_HAVE_INTRINSICS
3138
3139 //------------------------inline_native_currentThread------------------
3140 bool LibraryCallKit::inline_native_currentThread() {
3141 Node* junk = NULL;
3142 set_result(generate_current_thread(junk));
3143 return true;
3144 }
3145
3146 //------------------------inline_native_isInterrupted------------------
3147 // private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
3148 bool LibraryCallKit::inline_native_isInterrupted() {
3149 // Add a fast path to t.isInterrupted(clear_int):
3150 // (t == Thread.current() &&
3151 // (!TLS._osthread._interrupted || WINDOWS_ONLY(false) NOT_WINDOWS(!clear_int)))
3152 // ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
3153 // So, in the common case that the interrupt bit is false,
3154 // we avoid making a call into the VM. Even if the interrupt bit
3155 // is true, if the clear_int argument is false, we avoid the VM call.
3156 // However, if the receiver is not currentThread, we must call the VM,
3157 // because there must be some locking done around the operation.
3158
3159 // We only go to the fast case code if we pass two guards.
3160 // Paths which do not pass are accumulated in the slow_region.
3161
3162 enum {
3163 no_int_result_path = 1, // t == Thread.current() && !TLS._osthread._interrupted
3164 no_clear_result_path = 2, // t == Thread.current() && TLS._osthread._interrupted && !clear_int
3165 slow_result_path = 3, // slow path: t.isInterrupted(clear_int)
3166 PATH_LIMIT
3167 };
3168
3169 // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
3170 // out of the function.
3171 insert_mem_bar(Op_MemBarCPUOrder);
3172
3173 RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
3174 PhiNode* result_val = new PhiNode(result_rgn, TypeInt::BOOL);
3175
3176 RegionNode* slow_region = new RegionNode(1);
3177 record_for_igvn(slow_region);
3178
3179 // (a) Receiving thread must be the current thread.
3180 Node* rec_thr = argument(0);
3181 Node* tls_ptr = NULL;
3182 Node* cur_thr = generate_current_thread(tls_ptr);
3183
3184 // Resolve oops to stable for CmpP below.
3185 cur_thr = access_resolve(cur_thr, 0);
3186 rec_thr = access_resolve(rec_thr, 0);
3187
3188 Node* cmp_thr = _gvn.transform(new CmpPNode(cur_thr, rec_thr));
3189 Node* bol_thr = _gvn.transform(new BoolNode(cmp_thr, BoolTest::ne));
3190
3191 generate_slow_guard(bol_thr, slow_region);
3192
3193 // (b) Interrupt bit on TLS must be false.
3194 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3195 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3196 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3197
3198 // Set the control input on the field _interrupted read to prevent it floating up.
3199 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
3200 Node* cmp_bit = _gvn.transform(new CmpINode(int_bit, intcon(0)));
3201 Node* bol_bit = _gvn.transform(new BoolNode(cmp_bit, BoolTest::ne));
3202
3203 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3204
3205 // First fast path: if (!TLS._interrupted) return false;
3206 Node* false_bit = _gvn.transform(new IfFalseNode(iff_bit));
3207 result_rgn->init_req(no_int_result_path, false_bit);
3208 result_val->init_req(no_int_result_path, intcon(0));
3209
3210 // drop through to next case
3211 set_control( _gvn.transform(new IfTrueNode(iff_bit)));
3212
3213 #ifndef _WINDOWS
3214 // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
3215 Node* clr_arg = argument(1);
3216 Node* cmp_arg = _gvn.transform(new CmpINode(clr_arg, intcon(0)));
3217 Node* bol_arg = _gvn.transform(new BoolNode(cmp_arg, BoolTest::ne));
3218 IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
3219
3220 // Second fast path: ... else if (!clear_int) return true;
3221 Node* false_arg = _gvn.transform(new IfFalseNode(iff_arg));
3222 result_rgn->init_req(no_clear_result_path, false_arg);
3223 result_val->init_req(no_clear_result_path, intcon(1));
3224
3225 // drop through to next case
3226 set_control( _gvn.transform(new IfTrueNode(iff_arg)));
3227 #else
3228 // To return true on Windows you must read the _interrupted field
3229 // and check the event state i.e. take the slow path.
3230 #endif // _WINDOWS
3231
3232 // (d) Otherwise, go to the slow path.
3233 slow_region->add_req(control());
3234 set_control( _gvn.transform(slow_region));
3235
3236 if (stopped()) {
3237 // There is no slow path.
3238 result_rgn->init_req(slow_result_path, top());
3239 result_val->init_req(slow_result_path, top());
3240 } else {
3241 // non-virtual because it is a private non-static
3242 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
3243
3244 Node* slow_val = set_results_for_java_call(slow_call);
3245 // this->control() comes from set_results_for_java_call
3246
3247 Node* fast_io = slow_call->in(TypeFunc::I_O);
3248 Node* fast_mem = slow_call->in(TypeFunc::Memory);
3249
3250 // These two phis are pre-filled with copies of of the fast IO and Memory
3251 PhiNode* result_mem = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3252 PhiNode* result_io = PhiNode::make(result_rgn, fast_io, Type::ABIO);
3253
3254 result_rgn->init_req(slow_result_path, control());
3255 result_io ->init_req(slow_result_path, i_o());
3256 result_mem->init_req(slow_result_path, reset_memory());
3257 result_val->init_req(slow_result_path, slow_val);
3258
3259 set_all_memory(_gvn.transform(result_mem));
3260 set_i_o( _gvn.transform(result_io));
3261 }
3262
3263 C->set_has_split_ifs(true); // Has chance for split-if optimization
3264 set_result(result_rgn, result_val);
3265 return true;
3266 }
3267
3268 //-----------------------load_klass_from_mirror_common-------------------------
3269 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3270 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3271 // and branch to the given path on the region.
3272 // If never_see_null, take an uncommon trap on null, so we can optimistically
3273 // compile for the non-null case.
3274 // If the region is NULL, force never_see_null = true.
3275 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3276 bool never_see_null,
3277 RegionNode* region,
3278 int null_path,
3279 int offset) {
3280 if (region == NULL) never_see_null = true;
3281 Node* p = basic_plus_adr(mirror, offset);
3282 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3283 Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3284 Node* null_ctl = top();
3285 kls = null_check_oop(kls, &null_ctl, never_see_null);
3286 if (region != NULL) {
3287 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3288 region->init_req(null_path, null_ctl);
3289 } else {
3290 assert(null_ctl == top(), "no loose ends");
3291 }
3292 return kls;
3293 }
3294
3295 //--------------------(inline_native_Class_query helpers)---------------------
3296 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3297 // Fall through if (mods & mask) == bits, take the guard otherwise.
3298 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3299 // Branch around if the given klass has the given modifier bit set.
3300 // Like generate_guard, adds a new path onto the region.
3301 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3302 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3303 Node* mask = intcon(modifier_mask);
3304 Node* bits = intcon(modifier_bits);
3305 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3306 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3307 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3308 return generate_fair_guard(bol, region);
3309 }
3310 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3311 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3312 }
3313
3314 Node* LibraryCallKit::generate_value_guard(Node* kls, RegionNode* region) {
3315 return generate_access_flags_guard(kls, JVM_ACC_VALUE, 0, region);
3316 }
3317
3318 //-------------------------inline_native_Class_query-------------------
3319 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3320 const Type* return_type = TypeInt::BOOL;
3321 Node* prim_return_value = top(); // what happens if it's a primitive class?
3322 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3323 bool expect_prim = false; // most of these guys expect to work on refs
3324
3325 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3326
3327 Node* mirror = argument(0);
3328 Node* obj = top();
3329
3330 switch (id) {
3331 case vmIntrinsics::_isInstance:
3332 // nothing is an instance of a primitive type
3333 prim_return_value = intcon(0);
3334 obj = argument(1);
3335 break;
3336 case vmIntrinsics::_getModifiers:
3337 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3338 assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3339 return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3340 break;
3341 case vmIntrinsics::_isInterface:
3342 prim_return_value = intcon(0);
3343 break;
3344 case vmIntrinsics::_isArray:
3345 prim_return_value = intcon(0);
3346 expect_prim = true; // cf. ObjectStreamClass.getClassSignature
3347 break;
3348 case vmIntrinsics::_isPrimitive:
3349 prim_return_value = intcon(1);
3350 expect_prim = true; // obviously
3351 break;
3352 case vmIntrinsics::_getSuperclass:
3353 prim_return_value = null();
3354 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3355 break;
3356 case vmIntrinsics::_getClassAccessFlags:
3357 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3358 return_type = TypeInt::INT; // not bool! 6297094
3359 break;
3360 default:
3361 fatal_unexpected_iid(id);
3362 break;
3363 }
3364
3365 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3366 if (mirror_con == NULL) return false; // cannot happen?
3367
3368 #ifndef PRODUCT
3369 if (C->print_intrinsics() || C->print_inlining()) {
3370 ciType* k = mirror_con->java_mirror_type();
3371 if (k) {
3372 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3373 k->print_name();
3374 tty->cr();
3375 }
3376 }
3377 #endif
3378
3379 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3380 RegionNode* region = new RegionNode(PATH_LIMIT);
3381 record_for_igvn(region);
3382 PhiNode* phi = new PhiNode(region, return_type);
3383
3384 // The mirror will never be null of Reflection.getClassAccessFlags, however
3385 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3386 // if it is. See bug 4774291.
3387
3388 // For Reflection.getClassAccessFlags(), the null check occurs in
3389 // the wrong place; see inline_unsafe_access(), above, for a similar
3390 // situation.
3391 mirror = null_check(mirror);
3392 // If mirror or obj is dead, only null-path is taken.
3393 if (stopped()) return true;
3394
3395 if (expect_prim) never_see_null = false; // expect nulls (meaning prims)
3396
3397 // Now load the mirror's klass metaobject, and null-check it.
3398 // Side-effects region with the control path if the klass is null.
3399 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
3400 // If kls is null, we have a primitive mirror.
3401 phi->init_req(_prim_path, prim_return_value);
3402 if (stopped()) { set_result(region, phi); return true; }
3403 bool safe_for_replace = (region->in(_prim_path) == top());
3404
3405 Node* p; // handy temp
3406 Node* null_ctl;
3407
3408 // Now that we have the non-null klass, we can perform the real query.
3409 // For constant classes, the query will constant-fold in LoadNode::Value.
3410 Node* query_value = top();
3411 switch (id) {
3412 case vmIntrinsics::_isInstance:
3413 // nothing is an instance of a primitive type
3414 query_value = gen_instanceof(obj, kls, safe_for_replace);
3415 break;
3416
3417 case vmIntrinsics::_getModifiers:
3418 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3419 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3420 break;
3421
3422 case vmIntrinsics::_isInterface:
3423 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3424 if (generate_interface_guard(kls, region) != NULL)
3425 // A guard was added. If the guard is taken, it was an interface.
3426 phi->add_req(intcon(1));
3427 // If we fall through, it's a plain class.
3428 query_value = intcon(0);
3429 break;
3430
3431 case vmIntrinsics::_isArray:
3432 // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
3433 if (generate_array_guard(kls, region) != NULL)
3434 // A guard was added. If the guard is taken, it was an array.
3435 phi->add_req(intcon(1));
3436 // If we fall through, it's a plain class.
3437 query_value = intcon(0);
3438 break;
3439
3440 case vmIntrinsics::_isPrimitive:
3441 query_value = intcon(0); // "normal" path produces false
3442 break;
3443
3444 case vmIntrinsics::_getSuperclass:
3445 // The rules here are somewhat unfortunate, but we can still do better
3446 // with random logic than with a JNI call.
3447 // Interfaces store null or Object as _super, but must report null.
3448 // Arrays store an intermediate super as _super, but must report Object.
3449 // Other types can report the actual _super.
3450 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3451 if (generate_interface_guard(kls, region) != NULL)
3452 // A guard was added. If the guard is taken, it was an interface.
3453 phi->add_req(null());
3454 if (generate_array_guard(kls, region) != NULL)
3455 // A guard was added. If the guard is taken, it was an array.
3456 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
3457 // If we fall through, it's a plain class. Get its _super.
3458 p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
3459 kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
3460 null_ctl = top();
3461 kls = null_check_oop(kls, &null_ctl);
3462 if (null_ctl != top()) {
3463 // If the guard is taken, Object.superClass is null (both klass and mirror).
3464 region->add_req(null_ctl);
3465 phi ->add_req(null());
3466 }
3467 if (!stopped()) {
3468 query_value = load_mirror_from_klass(kls);
3469 }
3470 break;
3471
3472 case vmIntrinsics::_getClassAccessFlags:
3473 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3474 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3475 break;
3476
3477 default:
3478 fatal_unexpected_iid(id);
3479 break;
3480 }
3481
3482 // Fall-through is the normal case of a query to a real class.
3483 phi->init_req(1, query_value);
3484 region->init_req(1, control());
3485
3486 C->set_has_split_ifs(true); // Has chance for split-if optimization
3487 set_result(region, phi);
3488 return true;
3489 }
3490
3491 //-------------------------inline_Class_cast-------------------
3492 bool LibraryCallKit::inline_Class_cast() {
3493 Node* mirror = argument(0); // Class
3494 Node* obj = argument(1);
3495 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3496 if (mirror_con == NULL) {
3497 return false; // dead path (mirror->is_top()).
3498 }
3499 if (obj == NULL || obj->is_top()) {
3500 return false; // dead path
3501 }
3502
3503 ciKlass* obj_klass = NULL;
3504 if (obj->is_ValueType()) {
3505 const TypeValueType* tvt = _gvn.type(obj)->is_valuetype();
3506 obj_klass = tvt->value_klass();
3507 } else {
3508 const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
3509 if (tp != NULL) {
3510 obj_klass = tp->klass();
3511 }
3512 }
3513
3514 // First, see if Class.cast() can be folded statically.
3515 // java_mirror_type() returns non-null for compile-time Class constants.
3516 ciType* tm = mirror_con->java_mirror_type();
3517 if (tm != NULL && tm->is_klass() &&
3518 obj_klass != NULL) {
3519 if (!obj_klass->is_loaded()) {
3520 // Don't use intrinsic when class is not loaded.
3521 return false;
3522 } else {
3523 int static_res = C->static_subtype_check(tm->as_klass(), obj_klass);
3524 if (static_res == Compile::SSC_always_true) {
3525 // isInstance() is true - fold the code.
3526 set_result(obj);
3527 return true;
3528 } else if (static_res == Compile::SSC_always_false) {
3529 // Don't use intrinsic, have to throw ClassCastException.
3530 // If the reference is null, the non-intrinsic bytecode will
3531 // be optimized appropriately.
3532 return false;
3533 }
3534 }
3535 }
3536
3537 // Bailout intrinsic and do normal inlining if exception path is frequent.
3538 if (too_many_traps(Deoptimization::Reason_intrinsic)) {
3539 return false;
3540 }
3541
3542 // Generate dynamic checks.
3543 // Class.cast() is java implementation of _checkcast bytecode.
3544 // Do checkcast (Parse::do_checkcast()) optimizations here.
3545
3546 mirror = null_check(mirror);
3547 // If mirror is dead, only null-path is taken.
3548 if (stopped()) {
3549 return true;
3550 }
3551
3552 // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive).
3553 enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT };
3554 RegionNode* region = new RegionNode(PATH_LIMIT);
3555 record_for_igvn(region);
3556
3557 // Now load the mirror's klass metaobject, and null-check it.
3558 // If kls is null, we have a primitive mirror and
3559 // nothing is an instance of a primitive type.
3560 Node* kls = load_klass_from_mirror(mirror, false, region, _prim_path);
3561
3562 Node* res = top();
3563 if (!stopped()) {
3564 Node* bad_type_ctrl = top();
3565 // Do checkcast optimizations.
3566 res = gen_checkcast(obj, kls, &bad_type_ctrl);
3567 region->init_req(_bad_type_path, bad_type_ctrl);
3568 }
3569 if (region->in(_prim_path) != top() ||
3570 region->in(_bad_type_path) != top()) {
3571 // Let Interpreter throw ClassCastException.
3572 PreserveJVMState pjvms(this);
3573 set_control(_gvn.transform(region));
3574 uncommon_trap(Deoptimization::Reason_intrinsic,
3575 Deoptimization::Action_maybe_recompile);
3576 }
3577 if (!stopped()) {
3578 set_result(res);
3579 }
3580 return true;
3581 }
3582
3583
3584 //--------------------------inline_native_subtype_check------------------------
3585 // This intrinsic takes the JNI calls out of the heart of
3586 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3587 bool LibraryCallKit::inline_native_subtype_check() {
3588 // Pull both arguments off the stack.
3589 Node* args[2]; // two java.lang.Class mirrors: superc, subc
3590 args[0] = argument(0);
3591 args[1] = argument(1);
3592 Node* klasses[2]; // corresponding Klasses: superk, subk
3593 klasses[0] = klasses[1] = top();
3594
3595 enum {
3596 // A full decision tree on {superc is prim, subc is prim}:
3597 _prim_0_path = 1, // {P,N} => false
3598 // {P,P} & superc!=subc => false
3599 _prim_same_path, // {P,P} & superc==subc => true
3600 _prim_1_path, // {N,P} => false
3601 _ref_subtype_path, // {N,N} & subtype check wins => true
3602 _both_ref_path, // {N,N} & subtype check loses => false
3603 PATH_LIMIT
3604 };
3605
3606 RegionNode* region = new RegionNode(PATH_LIMIT);
3607 Node* phi = new PhiNode(region, TypeInt::BOOL);
3608 record_for_igvn(region);
3609
3610 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
3611 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3612 int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3613
3614 // First null-check both mirrors and load each mirror's klass metaobject.
3615 int which_arg;
3616 for (which_arg = 0; which_arg <= 1; which_arg++) {
3617 Node* arg = args[which_arg];
3618 arg = null_check(arg);
3619 if (stopped()) break;
3620 args[which_arg] = arg;
3621
3622 Node* p = basic_plus_adr(arg, class_klass_offset);
3623 Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
3624 klasses[which_arg] = _gvn.transform(kls);
3625 }
3626
3627 // Resolve oops to stable for CmpP below.
3628 args[0] = access_resolve(args[0], 0);
3629 args[1] = access_resolve(args[1], 0);
3630
3631 // Having loaded both klasses, test each for null.
3632 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3633 for (which_arg = 0; which_arg <= 1; which_arg++) {
3634 Node* kls = klasses[which_arg];
3635 Node* null_ctl = top();
3636 kls = null_check_oop(kls, &null_ctl, never_see_null);
3637 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3638 region->init_req(prim_path, null_ctl);
3639 if (stopped()) break;
3640 klasses[which_arg] = kls;
3641 }
3642
3643 if (!stopped()) {
3644 // now we have two reference types, in klasses[0..1]
3645 Node* subk = klasses[1]; // the argument to isAssignableFrom
3646 Node* superk = klasses[0]; // the receiver
3647 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3648 // now we have a successful reference subtype check
3649 region->set_req(_ref_subtype_path, control());
3650 }
3651
3652 // If both operands are primitive (both klasses null), then
3653 // we must return true when they are identical primitives.
3654 // It is convenient to test this after the first null klass check.
3655 set_control(region->in(_prim_0_path)); // go back to first null check
3656 if (!stopped()) {
3657 // Since superc is primitive, make a guard for the superc==subc case.
3658 Node* cmp_eq = _gvn.transform(new CmpPNode(args[0], args[1]));
3659 Node* bol_eq = _gvn.transform(new BoolNode(cmp_eq, BoolTest::eq));
3660 generate_guard(bol_eq, region, PROB_FAIR);
3661 if (region->req() == PATH_LIMIT+1) {
3662 // A guard was added. If the added guard is taken, superc==subc.
3663 region->swap_edges(PATH_LIMIT, _prim_same_path);
3664 region->del_req(PATH_LIMIT);
3665 }
3666 region->set_req(_prim_0_path, control()); // Not equal after all.
3667 }
3668
3669 // these are the only paths that produce 'true':
3670 phi->set_req(_prim_same_path, intcon(1));
3671 phi->set_req(_ref_subtype_path, intcon(1));
3672
3673 // pull together the cases:
3674 assert(region->req() == PATH_LIMIT, "sane region");
3675 for (uint i = 1; i < region->req(); i++) {
3676 Node* ctl = region->in(i);
3677 if (ctl == NULL || ctl == top()) {
3678 region->set_req(i, top());
3679 phi ->set_req(i, top());
3680 } else if (phi->in(i) == NULL) {
3681 phi->set_req(i, intcon(0)); // all other paths produce 'false'
3682 }
3683 }
3684
3685 set_control(_gvn.transform(region));
3686 set_result(_gvn.transform(phi));
3687 return true;
3688 }
3689
3690 //---------------------generate_array_guard_common------------------------
3691 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {
3692
3693 if (stopped()) {
3694 return NULL;
3695 }
3696
3697 // Like generate_guard, adds a new path onto the region.
3698 jint layout_con = 0;
3699 Node* layout_val = get_layout_helper(kls, layout_con);
3700 if (layout_val == NULL) {
3701 bool query = 0;
3702 switch(kind) {
3703 case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break;
3704 case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
3705 case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
3706 case ValueArray: query = Klass::layout_helper_is_valueArray(layout_con); break;
3707 case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
3708 case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
3709 default:
3710 ShouldNotReachHere();
3711 }
3712 if (!query) {
3713 return NULL; // never a branch
3714 } else { // always a branch
3715 Node* always_branch = control();
3716 if (region != NULL)
3717 region->add_req(always_branch);
3718 set_control(top());
3719 return always_branch;
3720 }
3721 }
3722 unsigned int value = 0;
3723 BoolTest::mask btest = BoolTest::illegal;
3724 switch(kind) {
3725 case ObjectArray:
3726 case NonObjectArray: {
3727 value = Klass::_lh_array_tag_obj_value;
3728 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3729 btest = kind == ObjectArray ? BoolTest::eq : BoolTest::ne;
3730 break;
3731 }
3732 case TypeArray: {
3733 value = Klass::_lh_array_tag_type_value;
3734 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3735 btest = BoolTest::eq;
3736 break;
3737 }
3738 case ValueArray: {
3739 value = Klass::_lh_array_tag_vt_value;
3740 layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3741 btest = BoolTest::eq;
3742 break;
3743 }
3744 case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
3745 case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
3746 default:
3747 ShouldNotReachHere();
3748 }
3749 // Now test the correct condition.
3750 jint nval = (jint)value;
3751 Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
3752 Node* bol = _gvn.transform(new BoolNode(cmp, btest));
3753 return generate_fair_guard(bol, region);
3754 }
3755
3756
3757 //-----------------------inline_native_newArray--------------------------
3758 // private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
3759 // private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
3760 bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
3761 Node* mirror;
3762 Node* count_val;
3763 if (uninitialized) {
3764 mirror = argument(1);
3765 count_val = argument(2);
3766 } else {
3767 mirror = argument(0);
3768 count_val = argument(1);
3769 }
3770
3771 mirror = null_check(mirror);
3772 // If mirror or obj is dead, only null-path is taken.
3773 if (stopped()) return true;
3774
3775 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3776 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
3777 PhiNode* result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
3778 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
3779 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
3780
3781 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3782 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3783 result_reg, _slow_path);
3784 Node* normal_ctl = control();
3785 Node* no_array_ctl = result_reg->in(_slow_path);
3786
3787 // Generate code for the slow case. We make a call to newArray().
3788 set_control(no_array_ctl);
3789 if (!stopped()) {
3790 // Either the input type is void.class, or else the
3791 // array klass has not yet been cached. Either the
3792 // ensuing call will throw an exception, or else it
3793 // will cache the array klass for next time.
3794 PreserveJVMState pjvms(this);
3795 CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
3796 Node* slow_result = set_results_for_java_call(slow_call);
3797 // this->control() comes from set_results_for_java_call
3798 result_reg->set_req(_slow_path, control());
3799 result_val->set_req(_slow_path, slow_result);
3800 result_io ->set_req(_slow_path, i_o());
3801 result_mem->set_req(_slow_path, reset_memory());
3802 }
3803
3804 set_control(normal_ctl);
3805 if (!stopped()) {
3806 // Normal case: The array type has been cached in the java.lang.Class.
3807 // The following call works fine even if the array type is polymorphic.
3808 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3809 Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
3810 result_reg->init_req(_normal_path, control());
3811 result_val->init_req(_normal_path, obj);
3812 result_io ->init_req(_normal_path, i_o());
3813 result_mem->init_req(_normal_path, reset_memory());
3814
3815 if (uninitialized) {
3816 // Mark the allocation so that zeroing is skipped
3817 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(obj, &_gvn);
3818 alloc->maybe_set_complete(&_gvn);
3819 }
3820 }
3821
3822 // Return the combined state.
3823 set_i_o( _gvn.transform(result_io) );
3824 set_all_memory( _gvn.transform(result_mem));
3825
3826 C->set_has_split_ifs(true); // Has chance for split-if optimization
3827 set_result(result_reg, result_val);
3828 return true;
3829 }
3830
3831 //----------------------inline_native_getLength--------------------------
3832 // public static native int java.lang.reflect.Array.getLength(Object array);
3833 bool LibraryCallKit::inline_native_getLength() {
3834 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3835
3836 Node* array = null_check(argument(0));
3837 // If array is dead, only null-path is taken.
3838 if (stopped()) return true;
3839
3840 // Deoptimize if it is a non-array.
3841 Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
3842
3843 if (non_array != NULL) {
3844 PreserveJVMState pjvms(this);
3845 set_control(non_array);
3846 uncommon_trap(Deoptimization::Reason_intrinsic,
3847 Deoptimization::Action_maybe_recompile);
3848 }
3849
3850 // If control is dead, only non-array-path is taken.
3851 if (stopped()) return true;
3852
3853 // The works fine even if the array type is polymorphic.
3854 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3855 Node* result = load_array_length(array);
3856
3857 C->set_has_split_ifs(true); // Has chance for split-if optimization
3858 set_result(result);
3859 return true;
3860 }
3861
3862 //------------------------inline_array_copyOf----------------------------
3863 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
3864 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
3865 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3866 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3867
3868 // Get the arguments.
3869 Node* original = argument(0);
3870 Node* start = is_copyOfRange? argument(1): intcon(0);
3871 Node* end = is_copyOfRange? argument(2): argument(1);
3872 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3873
3874 const TypeAryPtr* original_t = _gvn.type(original)->isa_aryptr();
3875 const TypeInstPtr* mirror_t = _gvn.type(array_type_mirror)->isa_instptr();
3876 if (EnableValhalla && ValueArrayFlatten &&
3877 (original_t == NULL || mirror_t == NULL ||
3878 (mirror_t->java_mirror_type() == NULL &&
3879 (original_t->elem()->isa_valuetype() ||
3880 (original_t->elem()->make_oopptr() != NULL &&
3881 original_t->elem()->make_oopptr()->can_be_value_type()))))) {
3882 // We need to know statically if the copy is to a flattened array
3883 // or not but can't tell.
3884 return false;
3885 }
3886
3887 Node* newcopy = NULL;
3888
3889 // Set the original stack and the reexecute bit for the interpreter to reexecute
3890 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3891 { PreserveReexecuteState preexecs(this);
3892 jvms()->set_should_reexecute(true);
3893
3894 array_type_mirror = null_check(array_type_mirror);
3895 original = null_check(original);
3896
3897 // Check if a null path was taken unconditionally.
3898 if (stopped()) return true;
3899
3900 Node* orig_length = load_array_length(original);
3901
3902 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3903 klass_node = null_check(klass_node);
3904
3905 RegionNode* bailout = new RegionNode(1);
3906 record_for_igvn(bailout);
3907
3908 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3909 // Bail out if that is so.
3910 // Value type array may have object field that would require a
3911 // write barrier. Conservatively, go to slow path.
3912 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
3913 Node* not_objArray = !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing) ?
3914 generate_typeArray_guard(klass_node, bailout) : generate_non_objArray_guard(klass_node, bailout);
3915 if (not_objArray != NULL) {
3916 // Improve the klass node's type from the new optimistic assumption:
3917 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3918 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
3919 Node* cast = new CastPPNode(klass_node, akls);
3920 cast->init_req(0, control());
3921 klass_node = _gvn.transform(cast);
3922 }
3923
3924 Node* original_kls = load_object_klass(original);
3925 // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
3926 // loads/stores but it is legal only if we're sure the
3927 // Arrays.copyOf would succeed. So we need all input arguments
3928 // to the copyOf to be validated, including that the copy to the
3929 // new array won't trigger an ArrayStoreException. That subtype
3930 // check can be optimized if we know something on the type of
3931 // the input array from type speculation.
3932 if (_gvn.type(klass_node)->singleton() && !stopped()) {
3933 ciKlass* subk = _gvn.type(original_kls)->is_klassptr()->klass();
3934 ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
3935
3936 int test = C->static_subtype_check(superk, subk);
3937 if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
3938 const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
3939 if (t_original->speculative_type() != NULL) {
3940 original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
3941 original_kls = load_object_klass(original);
3942 }
3943 }
3944 }
3945
3946 if (EnableValhalla) {
3947 // Either both or neither new array klass and original array
3948 // klass must be flattened
3949 Node* flattened_klass = generate_valueArray_guard(klass_node, NULL);
3950 generate_valueArray_guard(original_kls, bailout);
3951 if (flattened_klass != NULL) {
3952 RegionNode* r = new RegionNode(2);
3953 record_for_igvn(r);
3954 r->init_req(1, control());
3955 set_control(flattened_klass);
3956 generate_valueArray_guard(original_kls, r);
3957 bailout->add_req(control());
3958 set_control(_gvn.transform(r));
3959 }
3960 }
3961
3962 // Bail out if either start or end is negative.
3963 generate_negative_guard(start, bailout, &start);
3964 generate_negative_guard(end, bailout, &end);
3965
3966 Node* length = end;
3967 if (_gvn.type(start) != TypeInt::ZERO) {
3968 length = _gvn.transform(new SubINode(end, start));
3969 }
3970
3971 // Bail out if length is negative.
3972 // Without this the new_array would throw
3973 // NegativeArraySizeException but IllegalArgumentException is what
3974 // should be thrown
3975 generate_negative_guard(length, bailout, &length);
3976
3977 if (bailout->req() > 1) {
3978 PreserveJVMState pjvms(this);
3979 set_control(_gvn.transform(bailout));
3980 uncommon_trap(Deoptimization::Reason_intrinsic,
3981 Deoptimization::Action_maybe_recompile);
3982 }
3983
3984 if (!stopped()) {
3985 // How many elements will we copy from the original?
3986 // The answer is MinI(orig_length - start, length).
3987 Node* orig_tail = _gvn.transform(new SubINode(orig_length, start));
3988 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3989
3990 original = access_resolve(original, ACCESS_READ);
3991
3992 // Generate a direct call to the right arraycopy function(s).
3993 // We know the copy is disjoint but we might not know if the
3994 // oop stores need checking.
3995 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
3996 // This will fail a store-check if x contains any non-nulls.
3997
3998 bool validated = false;
3999 // Reason_class_check rather than Reason_intrinsic because we
4000 // want to intrinsify even if this traps.
4001 if (!too_many_traps(Deoptimization::Reason_class_check)) {
4002 Node* not_subtype_ctrl = gen_subtype_check(original_kls,
4003 klass_node);
4004
4005 if (not_subtype_ctrl != top()) {
4006 PreserveJVMState pjvms(this);
4007 set_control(not_subtype_ctrl);
4008 uncommon_trap(Deoptimization::Reason_class_check,
4009 Deoptimization::Action_make_not_entrant);
4010 assert(stopped(), "Should be stopped");
4011 }
4012 validated = true;
4013 }
4014
4015 if (!stopped()) {
4016 newcopy = new_array(klass_node, length, 0); // no arguments to push
4017
4018 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
4019 original_kls, klass_node);
4020 if (!is_copyOfRange) {
4021 ac->set_copyof(validated);
4022 } else {
4023 ac->set_copyofrange(validated);
4024 }
4025 Node* n = _gvn.transform(ac);
4026 if (n == ac) {
4027 ac->connect_outputs(this);
4028 } else {
4029 assert(validated, "shouldn't transform if all arguments not validated");
4030 set_all_memory(n);
4031 }
4032 }
4033 }
4034 } // original reexecute is set back here
4035
4036 C->set_has_split_ifs(true); // Has chance for split-if optimization
4037 if (!stopped()) {
4038 set_result(newcopy);
4039 }
4040 return true;
4041 }
4042
4043
4044 //----------------------generate_virtual_guard---------------------------
4045 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
4046 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
4047 RegionNode* slow_region) {
4048 ciMethod* method = callee();
4049 int vtable_index = method->vtable_index();
4050 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4051 "bad index %d", vtable_index);
4052 // Get the Method* out of the appropriate vtable entry.
4053 int entry_offset = in_bytes(Klass::vtable_start_offset()) +
4054 vtable_index*vtableEntry::size_in_bytes() +
4055 vtableEntry::method_offset_in_bytes();
4056 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
4057 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4058
4059 // Compare the target method with the expected method (e.g., Object.hashCode).
4060 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
4061
4062 Node* native_call = makecon(native_call_addr);
4063 Node* chk_native = _gvn.transform(new CmpPNode(target_call, native_call));
4064 Node* test_native = _gvn.transform(new BoolNode(chk_native, BoolTest::ne));
4065
4066 return generate_slow_guard(test_native, slow_region);
4067 }
4068
4069 //-----------------------generate_method_call----------------------------
4070 // Use generate_method_call to make a slow-call to the real
4071 // method if the fast path fails. An alternative would be to
4072 // use a stub like OptoRuntime::slow_arraycopy_Java.
4073 // This only works for expanding the current library call,
4074 // not another intrinsic. (E.g., don't use this for making an
4075 // arraycopy call inside of the copyOf intrinsic.)
4076 CallJavaNode*
4077 LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
4078 // When compiling the intrinsic method itself, do not use this technique.
4079 guarantee(callee() != C->method(), "cannot make slow-call to self");
4080
4081 ciMethod* method = callee();
4082 // ensure the JVMS we have will be correct for this call
4083 guarantee(method_id == method->intrinsic_id(), "must match");
4084
4085 const TypeFunc* tf = TypeFunc::make(method);
4086 CallJavaNode* slow_call;
4087 if (is_static) {
4088 assert(!is_virtual, "");
4089 slow_call = new CallStaticJavaNode(C, tf,
4090 SharedRuntime::get_resolve_static_call_stub(),
4091 method, bci());
4092 } else if (is_virtual) {
4093 null_check_receiver();
4094 int vtable_index = Method::invalid_vtable_index;
4095 if (UseInlineCaches) {
4096 // Suppress the vtable call
4097 } else {
4098 // hashCode and clone are not a miranda methods,
4099 // so the vtable index is fixed.
4100 // No need to use the linkResolver to get it.
4101 vtable_index = method->vtable_index();
4102 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
4103 "bad index %d", vtable_index);
4104 }
4105 slow_call = new CallDynamicJavaNode(tf,
4106 SharedRuntime::get_resolve_virtual_call_stub(),
4107 method, vtable_index, bci());
4108 } else { // neither virtual nor static: opt_virtual
4109 null_check_receiver();
4110 slow_call = new CallStaticJavaNode(C, tf,
4111 SharedRuntime::get_resolve_opt_virtual_call_stub(),
4112 method, bci());
4113 slow_call->set_optimized_virtual(true);
4114 }
4115 if (CallGenerator::is_inlined_method_handle_intrinsic(this->method(), bci(), callee())) {
4116 // To be able to issue a direct call (optimized virtual or virtual)
4117 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
4118 // about the method being invoked should be attached to the call site to
4119 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
4120 slow_call->set_override_symbolic_info(true);
4121 }
4122 set_arguments_for_java_call(slow_call);
4123 set_edges_for_java_call(slow_call);
4124 return slow_call;
4125 }
4126
4127
4128 /**
4129 * Build special case code for calls to hashCode on an object. This call may
4130 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate
4131 * slightly different code.
4132 */
4133 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
4134 assert(is_static == callee()->is_static(), "correct intrinsic selection");
4135 assert(!(is_virtual && is_static), "either virtual, special, or static");
4136
4137 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
4138
4139 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4140 PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
4141 PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
4142 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4143 Node* obj = argument(0);
4144
4145 if (obj->is_ValueType() || gvn().type(obj)->is_valuetypeptr()) {
4146 return false;
4147 }
4148
4149 if (!is_static) {
4150 // Check for hashing null object
4151 obj = null_check_receiver();
4152 if (stopped()) return true; // unconditionally null
4153 result_reg->init_req(_null_path, top());
4154 result_val->init_req(_null_path, top());
4155 } else {
4156 // Do a null check, and return zero if null.
4157 // System.identityHashCode(null) == 0
4158 Node* null_ctl = top();
4159 obj = null_check_oop(obj, &null_ctl);
4160 result_reg->init_req(_null_path, null_ctl);
4161 result_val->init_req(_null_path, _gvn.intcon(0));
4162 }
4163
4164 // Unconditionally null? Then return right away.
4165 if (stopped()) {
4166 set_control( result_reg->in(_null_path));
4167 if (!stopped())
4168 set_result(result_val->in(_null_path));
4169 return true;
4170 }
4171
4172 // We only go to the fast case code if we pass a number of guards. The
4173 // paths which do not pass are accumulated in the slow_region.
4174 RegionNode* slow_region = new RegionNode(1);
4175 record_for_igvn(slow_region);
4176
4177 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4178 assert(!obj_type->isa_valuetype() || !obj_type->is_valuetypeptr(), "no value type here");
4179 if (is_static && obj_type->can_be_value_type()) {
4180 Node* obj_klass = load_object_klass(obj);
4181 generate_value_guard(obj_klass, slow_region);
4182 }
4183
4184 // If this is a virtual call, we generate a funny guard. We pull out
4185 // the vtable entry corresponding to hashCode() from the target object.
4186 // If the target method which we are calling happens to be the native
4187 // Object hashCode() method, we pass the guard. We do not need this
4188 // guard for non-virtual calls -- the caller is known to be the native
4189 // Object hashCode().
4190 if (is_virtual) {
4191 // After null check, get the object's klass.
4192 Node* obj_klass = load_object_klass(obj);
4193 generate_virtual_guard(obj_klass, slow_region);
4194 }
4195
4196 // Get the header out of the object, use LoadMarkNode when available
4197 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4198 // The control of the load must be NULL. Otherwise, the load can move before
4199 // the null check after castPP removal.
4200 Node* no_ctrl = NULL;
4201 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4202
4203 // Test the header to see if it is unlocked.
4204 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
4205 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4206 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
4207 Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4208 Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4209
4210 generate_slow_guard(test_unlocked, slow_region);
4211
4212 // Get the hash value and check to see that it has been properly assigned.
4213 // We depend on hash_mask being at most 32 bits and avoid the use of
4214 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4215 // vm: see markOop.hpp.
4216 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask);
4217 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
4218 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4219 // This hack lets the hash bits live anywhere in the mark object now, as long
4220 // as the shift drops the relevant bits into the low 32 bits. Note that
4221 // Java spec says that HashCode is an int so there's no point in capturing
4222 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4223 hshifted_header = ConvX2I(hshifted_header);
4224 Node *hash_val = _gvn.transform(new AndINode(hshifted_header, hash_mask));
4225
4226 Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash);
4227 Node *chk_assigned = _gvn.transform(new CmpINode( hash_val, no_hash_val));
4228 Node *test_assigned = _gvn.transform(new BoolNode( chk_assigned, BoolTest::eq));
4229
4230 generate_slow_guard(test_assigned, slow_region);
4231
4232 Node* init_mem = reset_memory();
4233 // fill in the rest of the null path:
4234 result_io ->init_req(_null_path, i_o());
4235 result_mem->init_req(_null_path, init_mem);
4236
4237 result_val->init_req(_fast_path, hash_val);
4238 result_reg->init_req(_fast_path, control());
4239 result_io ->init_req(_fast_path, i_o());
4240 result_mem->init_req(_fast_path, init_mem);
4241
4242 // Generate code for the slow case. We make a call to hashCode().
4243 set_control(_gvn.transform(slow_region));
4244 if (!stopped()) {
4245 // No need for PreserveJVMState, because we're using up the present state.
4246 set_all_memory(init_mem);
4247 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4248 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
4249 Node* slow_result = set_results_for_java_call(slow_call);
4250 // this->control() comes from set_results_for_java_call
4251 result_reg->init_req(_slow_path, control());
4252 result_val->init_req(_slow_path, slow_result);
4253 result_io ->set_req(_slow_path, i_o());
4254 result_mem ->set_req(_slow_path, reset_memory());
4255 }
4256
4257 // Return the combined state.
4258 set_i_o( _gvn.transform(result_io) );
4259 set_all_memory( _gvn.transform(result_mem));
4260
4261 set_result(result_reg, result_val);
4262 return true;
4263 }
4264
4265 //---------------------------inline_native_getClass----------------------------
4266 // public final native Class<?> java.lang.Object.getClass();
4267 //
4268 // Build special case code for calls to getClass on an object.
4269 bool LibraryCallKit::inline_native_getClass() {
4270 Node* obj = argument(0);
4271 if (obj->is_ValueType()) {
4272 ciKlass* vk = _gvn.type(obj)->is_valuetype()->value_klass();
4273 set_result(makecon(TypeInstPtr::make(vk->java_mirror())));
4274 return true;
4275 }
4276 obj = null_check_receiver();
4277 if (stopped()) return true;
4278 set_result(load_mirror_from_klass(load_object_klass(obj)));
4279 return true;
4280 }
4281
4282 //-----------------inline_native_Reflection_getCallerClass---------------------
4283 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4284 //
4285 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4286 //
4287 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4288 // in that it must skip particular security frames and checks for
4289 // caller sensitive methods.
4290 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4291 #ifndef PRODUCT
4292 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4293 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4294 }
4295 #endif
4296
4297 if (!jvms()->has_method()) {
4298 #ifndef PRODUCT
4299 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4300 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
4301 }
4302 #endif
4303 return false;
4304 }
4305
4306 // Walk back up the JVM state to find the caller at the required
4307 // depth.
4308 JVMState* caller_jvms = jvms();
4309
4310 // Cf. JVM_GetCallerClass
4311 // NOTE: Start the loop at depth 1 because the current JVM state does
4312 // not include the Reflection.getCallerClass() frame.
4313 for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
4314 ciMethod* m = caller_jvms->method();
4315 switch (n) {
4316 case 0:
4317 fatal("current JVM state does not include the Reflection.getCallerClass frame");
4318 break;
4319 case 1:
4320 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
4321 if (!m->caller_sensitive()) {
4322 #ifndef PRODUCT
4323 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4324 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
4325 }
4326 #endif
4327 return false; // bail-out; let JVM_GetCallerClass do the work
4328 }
4329 break;
4330 default:
4331 if (!m->is_ignored_by_security_stack_walk()) {
4332 // We have reached the desired frame; return the holder class.
4333 // Acquire method holder as java.lang.Class and push as constant.
4334 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
4335 ciInstance* caller_mirror = caller_klass->java_mirror();
4336 set_result(makecon(TypeInstPtr::make(caller_mirror)));
4337
4338 #ifndef PRODUCT
4339 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4340 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
4341 tty->print_cr(" JVM state at this point:");
4342 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4343 ciMethod* m = jvms()->of_depth(i)->method();
4344 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4345 }
4346 }
4347 #endif
4348 return true;
4349 }
4350 break;
4351 }
4352 }
4353
4354 #ifndef PRODUCT
4355 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4356 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4357 tty->print_cr(" JVM state at this point:");
4358 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4359 ciMethod* m = jvms()->of_depth(i)->method();
4360 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4361 }
4362 }
4363 #endif
4364
4365 return false; // bail-out; let JVM_GetCallerClass do the work
4366 }
4367
4368 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4369 Node* arg = argument(0);
4370 Node* result = NULL;
4371
4372 switch (id) {
4373 case vmIntrinsics::_floatToRawIntBits: result = new MoveF2INode(arg); break;
4374 case vmIntrinsics::_intBitsToFloat: result = new MoveI2FNode(arg); break;
4375 case vmIntrinsics::_doubleToRawLongBits: result = new MoveD2LNode(arg); break;
4376 case vmIntrinsics::_longBitsToDouble: result = new MoveL2DNode(arg); break;
4377
4378 case vmIntrinsics::_doubleToLongBits: {
4379 // two paths (plus control) merge in a wood
4380 RegionNode *r = new RegionNode(3);
4381 Node *phi = new PhiNode(r, TypeLong::LONG);
4382
4383 Node *cmpisnan = _gvn.transform(new CmpDNode(arg, arg));
4384 // Build the boolean node
4385 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4386
4387 // Branch either way.
4388 // NaN case is less traveled, which makes all the difference.
4389 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4390 Node *opt_isnan = _gvn.transform(ifisnan);
4391 assert( opt_isnan->is_If(), "Expect an IfNode");
4392 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4393 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4394
4395 set_control(iftrue);
4396
4397 static const jlong nan_bits = CONST64(0x7ff8000000000000);
4398 Node *slow_result = longcon(nan_bits); // return NaN
4399 phi->init_req(1, _gvn.transform( slow_result ));
4400 r->init_req(1, iftrue);
4401
4402 // Else fall through
4403 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4404 set_control(iffalse);
4405
4406 phi->init_req(2, _gvn.transform(new MoveD2LNode(arg)));
4407 r->init_req(2, iffalse);
4408
4409 // Post merge
4410 set_control(_gvn.transform(r));
4411 record_for_igvn(r);
4412
4413 C->set_has_split_ifs(true); // Has chance for split-if optimization
4414 result = phi;
4415 assert(result->bottom_type()->isa_long(), "must be");
4416 break;
4417 }
4418
4419 case vmIntrinsics::_floatToIntBits: {
4420 // two paths (plus control) merge in a wood
4421 RegionNode *r = new RegionNode(3);
4422 Node *phi = new PhiNode(r, TypeInt::INT);
4423
4424 Node *cmpisnan = _gvn.transform(new CmpFNode(arg, arg));
4425 // Build the boolean node
4426 Node *bolisnan = _gvn.transform(new BoolNode(cmpisnan, BoolTest::ne));
4427
4428 // Branch either way.
4429 // NaN case is less traveled, which makes all the difference.
4430 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4431 Node *opt_isnan = _gvn.transform(ifisnan);
4432 assert( opt_isnan->is_If(), "Expect an IfNode");
4433 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4434 Node *iftrue = _gvn.transform(new IfTrueNode(opt_ifisnan));
4435
4436 set_control(iftrue);
4437
4438 static const jint nan_bits = 0x7fc00000;
4439 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4440 phi->init_req(1, _gvn.transform( slow_result ));
4441 r->init_req(1, iftrue);
4442
4443 // Else fall through
4444 Node *iffalse = _gvn.transform(new IfFalseNode(opt_ifisnan));
4445 set_control(iffalse);
4446
4447 phi->init_req(2, _gvn.transform(new MoveF2INode(arg)));
4448 r->init_req(2, iffalse);
4449
4450 // Post merge
4451 set_control(_gvn.transform(r));
4452 record_for_igvn(r);
4453
4454 C->set_has_split_ifs(true); // Has chance for split-if optimization
4455 result = phi;
4456 assert(result->bottom_type()->isa_int(), "must be");
4457 break;
4458 }
4459
4460 default:
4461 fatal_unexpected_iid(id);
4462 break;
4463 }
4464 set_result(_gvn.transform(result));
4465 return true;
4466 }
4467
4468 //----------------------inline_unsafe_copyMemory-------------------------
4469 // public native void Unsafe.copyMemory0(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4470 bool LibraryCallKit::inline_unsafe_copyMemory() {
4471 if (callee()->is_static()) return false; // caller must have the capability!
4472 null_check_receiver(); // null-check receiver
4473 if (stopped()) return true;
4474
4475 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4476
4477 Node* src_ptr = argument(1); // type: oop
4478 Node* src_off = ConvL2X(argument(2)); // type: long
4479 Node* dst_ptr = argument(4); // type: oop
4480 Node* dst_off = ConvL2X(argument(5)); // type: long
4481 Node* size = ConvL2X(argument(7)); // type: long
4482
4483 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4484 "fieldOffset must be byte-scaled");
4485
4486 src_ptr = access_resolve(src_ptr, ACCESS_READ);
4487 dst_ptr = access_resolve(dst_ptr, ACCESS_WRITE);
4488 Node* src = make_unsafe_address(src_ptr, src_off, ACCESS_READ);
4489 Node* dst = make_unsafe_address(dst_ptr, dst_off, ACCESS_WRITE);
4490
4491 // Conservatively insert a memory barrier on all memory slices.
4492 // Do not let writes of the copy source or destination float below the copy.
4493 insert_mem_bar(Op_MemBarCPUOrder);
4494
4495 // Call it. Note that the length argument is not scaled.
4496 make_runtime_call(RC_LEAF|RC_NO_FP,
4497 OptoRuntime::fast_arraycopy_Type(),
4498 StubRoutines::unsafe_arraycopy(),
4499 "unsafe_arraycopy",
4500 TypeRawPtr::BOTTOM,
4501 src, dst, size XTOP);
4502
4503 // Do not let reads of the copy destination float above the copy.
4504 insert_mem_bar(Op_MemBarCPUOrder);
4505
4506 return true;
4507 }
4508
4509 //------------------------clone_coping-----------------------------------
4510 // Helper function for inline_native_clone.
4511 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) {
4512 assert(obj_size != NULL, "");
4513 Node* raw_obj = alloc_obj->in(1);
4514 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4515
4516 AllocateNode* alloc = NULL;
4517 if (ReduceBulkZeroing) {
4518 // We will be completely responsible for initializing this object -
4519 // mark Initialize node as complete.
4520 alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4521 // The object was just allocated - there should be no any stores!
4522 guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4523 // Mark as complete_with_arraycopy so that on AllocateNode
4524 // expansion, we know this AllocateNode is initialized by an array
4525 // copy and a StoreStore barrier exists after the array copy.
4526 alloc->initialization()->set_complete_with_arraycopy();
4527 }
4528
4529 // Copy the fastest available way.
4530 // TODO: generate fields copies for small objects instead.
4531 Node* size = _gvn.transform(obj_size);
4532
4533 // Exclude the header but include array length to copy by 8 bytes words.
4534 // Can't use base_offset_in_bytes(bt) since basic type is unknown.
4535 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
4536 instanceOopDesc::base_offset_in_bytes();
4537 // base_off:
4538 // 8 - 32-bit VM
4539 // 12 - 64-bit VM, compressed klass
4540 // 16 - 64-bit VM, normal klass
4541 if (base_off % BytesPerLong != 0) {
4542 assert(UseCompressedClassPointers, "");
4543 if (is_array) {
4544 // Exclude length to copy by 8 bytes words.
4545 base_off += sizeof(int);
4546 } else {
4547 // Include klass to copy by 8 bytes words.
4548 base_off = instanceOopDesc::klass_offset_in_bytes();
4549 }
4550 assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4551 }
4552 Node* src_base = basic_plus_adr(obj, base_off);
4553 Node* dst_base = basic_plus_adr(alloc_obj, base_off);
4554
4555 // Compute the length also, if needed:
4556 Node* countx = size;
4557 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
4558 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong)));
4559
4560 access_clone(src_base, dst_base, countx, is_array);
4561
4562 // Do not let reads from the cloned object float above the arraycopy.
4563 if (alloc != NULL) {
4564 // Do not let stores that initialize this object be reordered with
4565 // a subsequent store that would make this object accessible by
4566 // other threads.
4567 // Record what AllocateNode this StoreStore protects so that
4568 // escape analysis can go from the MemBarStoreStoreNode to the
4569 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4570 // based on the escape status of the AllocateNode.
4571 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
4572 } else {
4573 insert_mem_bar(Op_MemBarCPUOrder);
4574 }
4575 }
4576
4577 //------------------------inline_native_clone----------------------------
4578 // protected native Object java.lang.Object.clone();
4579 //
4580 // Here are the simple edge cases:
4581 // null receiver => normal trap
4582 // virtual and clone was overridden => slow path to out-of-line clone
4583 // not cloneable or finalizer => slow path to out-of-line Object.clone
4584 //
4585 // The general case has two steps, allocation and copying.
4586 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4587 //
4588 // Copying also has two cases, oop arrays and everything else.
4589 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4590 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4591 //
4592 // These steps fold up nicely if and when the cloned object's klass
4593 // can be sharply typed as an object array, a type array, or an instance.
4594 //
4595 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4596 PhiNode* result_val;
4597
4598 // Set the reexecute bit for the interpreter to reexecute
4599 // the bytecode that invokes Object.clone if deoptimization happens.
4600 { PreserveReexecuteState preexecs(this);
4601 jvms()->set_should_reexecute(true);
4602
4603 Node* obj = argument(0);
4604 if (obj->is_ValueType()) {
4605 return false;
4606 }
4607
4608 obj = null_check_receiver();
4609 if (stopped()) return true;
4610
4611 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4612
4613 // If we are going to clone an instance, we need its exact type to
4614 // know the number and types of fields to convert the clone to
4615 // loads/stores. Maybe a speculative type can help us.
4616 if (!obj_type->klass_is_exact() &&
4617 obj_type->speculative_type() != NULL &&
4618 obj_type->speculative_type()->is_instance_klass() &&
4619 !obj_type->speculative_type()->is_valuetype()) {
4620 ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
4621 if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
4622 !spec_ik->has_injected_fields()) {
4623 ciKlass* k = obj_type->klass();
4624 if (!k->is_instance_klass() ||
4625 k->as_instance_klass()->is_interface() ||
4626 k->as_instance_klass()->has_subklass()) {
4627 obj = maybe_cast_profiled_obj(obj, obj_type->speculative_type(), false);
4628 }
4629 }
4630 }
4631
4632 Node* obj_klass = load_object_klass(obj);
4633 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4634 const TypeOopPtr* toop = ((tklass != NULL)
4635 ? tklass->as_instance_type()
4636 : TypeInstPtr::NOTNULL);
4637
4638 // Conservatively insert a memory barrier on all memory slices.
4639 // Do not let writes into the original float below the clone.
4640 insert_mem_bar(Op_MemBarCPUOrder);
4641
4642 // paths into result_reg:
4643 enum {
4644 _slow_path = 1, // out-of-line call to clone method (virtual or not)
4645 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4646 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4647 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4648 PATH_LIMIT
4649 };
4650 RegionNode* result_reg = new RegionNode(PATH_LIMIT);
4651 result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
4652 PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
4653 PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
4654 record_for_igvn(result_reg);
4655
4656 // We only go to the fast case code if we pass a number of guards.
4657 // The paths which do not pass are accumulated in the slow_region.
4658 RegionNode* slow_region = new RegionNode(1);
4659 record_for_igvn(slow_region);
4660
4661 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4662 if (array_ctl != NULL) {
4663 // It's an array.
4664 PreserveJVMState pjvms(this);
4665 set_control(array_ctl);
4666
4667 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4668 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) {
4669 // Value type array may have object field that would require a
4670 // write barrier. Conservatively, go to slow path.
4671 generate_valueArray_guard(obj_klass, slow_region);
4672 }
4673
4674 if (!stopped()) {
4675 Node* obj_length = load_array_length(obj);
4676 Node* obj_size = NULL;
4677 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push
4678
4679 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4680 if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) {
4681 // If it is an oop array, it requires very special treatment,
4682 // because gc barriers are required when accessing the array.
4683 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4684 if (is_obja != NULL) {
4685 PreserveJVMState pjvms2(this);
4686 set_control(is_obja);
4687 // Generate a direct call to the right arraycopy function(s).
4688 Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
4689 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false);
4690 ac->set_cloneoop();
4691 Node* n = _gvn.transform(ac);
4692 assert(n == ac, "cannot disappear");
4693 ac->connect_outputs(this);
4694
4695 result_reg->init_req(_objArray_path, control());
4696 result_val->init_req(_objArray_path, alloc_obj);
4697 result_i_o ->set_req(_objArray_path, i_o());
4698 result_mem ->set_req(_objArray_path, reset_memory());
4699 }
4700 }
4701
4702 // Otherwise, there are no barriers to worry about.
4703 // (We can dispense with card marks if we know the allocation
4704 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
4705 // causes the non-eden paths to take compensating steps to
4706 // simulate a fresh allocation, so that no further
4707 // card marks are required in compiled code to initialize
4708 // the object.)
4709
4710 if (!stopped()) {
4711 copy_to_clone(obj, alloc_obj, obj_size, true);
4712
4713 // Present the results of the copy.
4714 result_reg->init_req(_array_path, control());
4715 result_val->init_req(_array_path, alloc_obj);
4716 result_i_o ->set_req(_array_path, i_o());
4717 result_mem ->set_req(_array_path, reset_memory());
4718 }
4719 }
4720 }
4721
4722 if (!stopped()) {
4723 // It's an instance (we did array above). Make the slow-path tests.
4724 // If this is a virtual call, we generate a funny guard. We grab
4725 // the vtable entry corresponding to clone() from the target object.
4726 // If the target method which we are calling happens to be the
4727 // Object clone() method, we pass the guard. We do not need this
4728 // guard for non-virtual calls; the caller is known to be the native
4729 // Object clone().
4730 if (is_virtual) {
4731 generate_virtual_guard(obj_klass, slow_region);
4732 }
4733
4734 // The object must be easily cloneable and must not have a finalizer.
4735 // Both of these conditions may be checked in a single test.
4736 // We could optimize the test further, but we don't care.
4737 generate_access_flags_guard(obj_klass,
4738 // Test both conditions:
4739 JVM_ACC_IS_CLONEABLE_FAST | JVM_ACC_HAS_FINALIZER,
4740 // Must be cloneable but not finalizer:
4741 JVM_ACC_IS_CLONEABLE_FAST,
4742 slow_region);
4743 }
4744
4745 if (!stopped()) {
4746 // It's an instance, and it passed the slow-path tests.
4747 PreserveJVMState pjvms(this);
4748 Node* obj_size = NULL;
4749 // Need to deoptimize on exception from allocation since Object.clone intrinsic
4750 // is reexecuted if deoptimization occurs and there could be problems when merging
4751 // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false).
4752 Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true);
4753
4754 copy_to_clone(obj, alloc_obj, obj_size, false);
4755
4756 // Present the results of the slow call.
4757 result_reg->init_req(_instance_path, control());
4758 result_val->init_req(_instance_path, alloc_obj);
4759 result_i_o ->set_req(_instance_path, i_o());
4760 result_mem ->set_req(_instance_path, reset_memory());
4761 }
4762
4763 // Generate code for the slow case. We make a call to clone().
4764 set_control(_gvn.transform(slow_region));
4765 if (!stopped()) {
4766 PreserveJVMState pjvms(this);
4767 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4768 // We need to deoptimize on exception (see comment above)
4769 Node* slow_result = set_results_for_java_call(slow_call, false, /* deoptimize */ true);
4770 // this->control() comes from set_results_for_java_call
4771 result_reg->init_req(_slow_path, control());
4772 result_val->init_req(_slow_path, slow_result);
4773 result_i_o ->set_req(_slow_path, i_o());
4774 result_mem ->set_req(_slow_path, reset_memory());
4775 }
4776
4777 // Return the combined state.
4778 set_control( _gvn.transform(result_reg));
4779 set_i_o( _gvn.transform(result_i_o));
4780 set_all_memory( _gvn.transform(result_mem));
4781 } // original reexecute is set back here
4782
4783 set_result(_gvn.transform(result_val));
4784 return true;
4785 }
4786
4787 // If we have a tightly coupled allocation, the arraycopy may take care
4788 // of the array initialization. If one of the guards we insert between
4789 // the allocation and the arraycopy causes a deoptimization, an
4790 // unitialized array will escape the compiled method. To prevent that
4791 // we set the JVM state for uncommon traps between the allocation and
4792 // the arraycopy to the state before the allocation so, in case of
4793 // deoptimization, we'll reexecute the allocation and the
4794 // initialization.
4795 JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) {
4796 if (alloc != NULL) {
4797 ciMethod* trap_method = alloc->jvms()->method();
4798 int trap_bci = alloc->jvms()->bci();
4799
4800 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &
4801 !C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_null_check)) {
4802 // Make sure there's no store between the allocation and the
4803 // arraycopy otherwise visible side effects could be rexecuted
4804 // in case of deoptimization and cause incorrect execution.
4805 bool no_interfering_store = true;
4806 Node* mem = alloc->in(TypeFunc::Memory);
4807 if (mem->is_MergeMem()) {
4808 for (MergeMemStream mms(merged_memory(), mem->as_MergeMem()); mms.next_non_empty2(); ) {
4809 Node* n = mms.memory();
4810 if (n != mms.memory2() && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4811 assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
4812 no_interfering_store = false;
4813 break;
4814 }
4815 }
4816 } else {
4817 for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
4818 Node* n = mms.memory();
4819 if (n != mem && !(n->is_Proj() && n->in(0) == alloc->initialization())) {
4820 assert(n->is_Store() || n->Opcode() == Op_ShenandoahWBMemProj, "what else?");
4821 no_interfering_store = false;
4822 break;
4823 }
4824 }
4825 }
4826
4827 if (no_interfering_store) {
4828 JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
4829 uint size = alloc->req();
4830 SafePointNode* sfpt = new SafePointNode(size, old_jvms);
4831 old_jvms->set_map(sfpt);
4832 for (uint i = 0; i < size; i++) {
4833 sfpt->init_req(i, alloc->in(i));
4834 }
4835 // re-push array length for deoptimization
4836 sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
4837 old_jvms->set_sp(old_jvms->sp()+1);
4838 old_jvms->set_monoff(old_jvms->monoff()+1);
4839 old_jvms->set_scloff(old_jvms->scloff()+1);
4840 old_jvms->set_endoff(old_jvms->endoff()+1);
4841 old_jvms->set_should_reexecute(true);
4842
4843 sfpt->set_i_o(map()->i_o());
4844 sfpt->set_memory(map()->memory());
4845 sfpt->set_control(map()->control());
4846
4847 JVMState* saved_jvms = jvms();
4848 saved_reexecute_sp = _reexecute_sp;
4849
4850 set_jvms(sfpt->jvms());
4851 _reexecute_sp = jvms()->sp();
4852
4853 return saved_jvms;
4854 }
4855 }
4856 }
4857 return NULL;
4858 }
4859
4860 // In case of a deoptimization, we restart execution at the
4861 // allocation, allocating a new array. We would leave an uninitialized
4862 // array in the heap that GCs wouldn't expect. Move the allocation
4863 // after the traps so we don't allocate the array if we
4864 // deoptimize. This is possible because tightly_coupled_allocation()
4865 // guarantees there's no observer of the allocated array at this point
4866 // and the control flow is simple enough.
4867 void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
4868 int saved_reexecute_sp, uint new_idx) {
4869 if (saved_jvms != NULL && !stopped()) {
4870 assert(alloc != NULL, "only with a tightly coupled allocation");
4871 // restore JVM state to the state at the arraycopy
4872 saved_jvms->map()->set_control(map()->control());
4873 assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
4874 assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
4875 // If we've improved the types of some nodes (null check) while
4876 // emitting the guards, propagate them to the current state
4877 map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
4878 set_jvms(saved_jvms);
4879 _reexecute_sp = saved_reexecute_sp;
4880
4881 // Remove the allocation from above the guards
4882 CallProjections* callprojs = alloc->extract_projections(true);
4883 InitializeNode* init = alloc->initialization();
4884 Node* alloc_mem = alloc->in(TypeFunc::Memory);
4885 C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
4886 C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
4887 C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
4888
4889 // move the allocation here (after the guards)
4890 _gvn.hash_delete(alloc);
4891 alloc->set_req(TypeFunc::Control, control());
4892 alloc->set_req(TypeFunc::I_O, i_o());
4893 Node *mem = reset_memory();
4894 set_all_memory(mem);
4895 alloc->set_req(TypeFunc::Memory, mem);
4896 set_control(init->proj_out_or_null(TypeFunc::Control));
4897 set_i_o(callprojs->fallthrough_ioproj);
4898
4899 // Update memory as done in GraphKit::set_output_for_allocation()
4900 const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
4901 const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
4902 if (ary_type->isa_aryptr() && length_type != NULL) {
4903 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4904 }
4905 const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot);
4906 int elemidx = C->get_alias_index(telemref);
4907 set_memory(init->proj_out_or_null(TypeFunc::Memory), Compile::AliasIdxRaw);
4908 set_memory(init->proj_out_or_null(TypeFunc::Memory), elemidx);
4909
4910 Node* allocx = _gvn.transform(alloc);
4911 assert(allocx == alloc, "where has the allocation gone?");
4912 assert(dest->is_CheckCastPP(), "not an allocation result?");
4913
4914 _gvn.hash_delete(dest);
4915 dest->set_req(0, control());
4916 Node* destx = _gvn.transform(dest);
4917 assert(destx == dest, "where has the allocation result gone?");
4918 }
4919 }
4920
4921
4922 //------------------------------inline_arraycopy-----------------------
4923 // public static native void java.lang.System.arraycopy(Object src, int srcPos,
4924 // Object dest, int destPos,
4925 // int length);
4926 bool LibraryCallKit::inline_arraycopy() {
4927 // Get the arguments.
4928 Node* src = argument(0); // type: oop
4929 Node* src_offset = argument(1); // type: int
4930 Node* dest = argument(2); // type: oop
4931 Node* dest_offset = argument(3); // type: int
4932 Node* length = argument(4); // type: int
4933
4934 uint new_idx = C->unique();
4935
4936 // Check for allocation before we add nodes that would confuse
4937 // tightly_coupled_allocation()
4938 AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
4939
4940 int saved_reexecute_sp = -1;
4941 JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
4942 // See arraycopy_restore_alloc_state() comment
4943 // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards
4944 // if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
4945 // if saved_jvms == NULL and alloc != NULL, we can't emit any guards
4946 bool can_emit_guards = (alloc == NULL || saved_jvms != NULL);
4947
4948 // The following tests must be performed
4949 // (1) src and dest are arrays.
4950 // (2) src and dest arrays must have elements of the same BasicType
4951 // (3) src and dest must not be null.
4952 // (4) src_offset must not be negative.
4953 // (5) dest_offset must not be negative.
4954 // (6) length must not be negative.
4955 // (7) src_offset + length must not exceed length of src.
4956 // (8) dest_offset + length must not exceed length of dest.
4957 // (9) each element of an oop array must be assignable
4958
4959 // (3) src and dest must not be null.
4960 // always do this here because we need the JVM state for uncommon traps
4961 Node* null_ctl = top();
4962 src = saved_jvms != NULL ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
4963 assert(null_ctl->is_top(), "no null control here");
4964 dest = null_check(dest, T_ARRAY);
4965
4966 if (!can_emit_guards) {
4967 // if saved_jvms == NULL and alloc != NULL, we don't emit any
4968 // guards but the arraycopy node could still take advantage of a
4969 // tightly allocated allocation. tightly_coupled_allocation() is
4970 // called again to make sure it takes the null check above into
4971 // account: the null check is mandatory and if it caused an
4972 // uncommon trap to be emitted then the allocation can't be
4973 // considered tightly coupled in this context.
4974 alloc = tightly_coupled_allocation(dest, NULL);
4975 }
4976
4977 bool validated = false;
4978
4979 const Type* src_type = _gvn.type(src);
4980 const Type* dest_type = _gvn.type(dest);
4981 const TypeAryPtr* top_src = src_type->isa_aryptr();
4982 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4983
4984 // Do we have the type of src?
4985 bool has_src = (top_src != NULL && top_src->klass() != NULL);
4986 // Do we have the type of dest?
4987 bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4988 // Is the type for src from speculation?
4989 bool src_spec = false;
4990 // Is the type for dest from speculation?
4991 bool dest_spec = false;
4992
4993 if ((!has_src || !has_dest) && can_emit_guards) {
4994 // We don't have sufficient type information, let's see if
4995 // speculative types can help. We need to have types for both src
4996 // and dest so that it pays off.
4997
4998 // Do we already have or could we have type information for src
4999 bool could_have_src = has_src;
5000 // Do we already have or could we have type information for dest
5001 bool could_have_dest = has_dest;
5002
5003 ciKlass* src_k = NULL;
5004 if (!has_src) {
5005 src_k = src_type->speculative_type_not_null();
5006 if (src_k != NULL && src_k->is_array_klass()) {
5007 could_have_src = true;
5008 }
5009 }
5010
5011 ciKlass* dest_k = NULL;
5012 if (!has_dest) {
5013 dest_k = dest_type->speculative_type_not_null();
5014 if (dest_k != NULL && dest_k->is_array_klass()) {
5015 could_have_dest = true;
5016 }
5017 }
5018
5019 if (could_have_src && could_have_dest) {
5020 // This is going to pay off so emit the required guards
5021 if (!has_src) {
5022 src = maybe_cast_profiled_obj(src, src_k, true);
5023 src_type = _gvn.type(src);
5024 top_src = src_type->isa_aryptr();
5025 has_src = (top_src != NULL && top_src->klass() != NULL);
5026 src_spec = true;
5027 }
5028 if (!has_dest) {
5029 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5030 dest_type = _gvn.type(dest);
5031 top_dest = dest_type->isa_aryptr();
5032 has_dest = (top_dest != NULL && top_dest->klass() != NULL);
5033 dest_spec = true;
5034 }
5035 }
5036 }
5037
5038 if (has_src && has_dest && can_emit_guards) {
5039 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
5040 BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
5041 if (src_elem == T_ARRAY) src_elem = T_OBJECT;
5042 if (dest_elem == T_ARRAY) dest_elem = T_OBJECT;
5043
5044 if (src_elem == dest_elem && src_elem == T_OBJECT) {
5045 // If both arrays are object arrays then having the exact types
5046 // for both will remove the need for a subtype check at runtime
5047 // before the call and may make it possible to pick a faster copy
5048 // routine (without a subtype check on every element)
5049 // Do we have the exact type of src?
5050 bool could_have_src = src_spec;
5051 // Do we have the exact type of dest?
5052 bool could_have_dest = dest_spec;
5053 ciKlass* src_k = top_src->klass();
5054 ciKlass* dest_k = top_dest->klass();
5055 if (!src_spec) {
5056 src_k = src_type->speculative_type_not_null();
5057 if (src_k != NULL && src_k->is_array_klass()) {
5058 could_have_src = true;
5059 }
5060 }
5061 if (!dest_spec) {
5062 dest_k = dest_type->speculative_type_not_null();
5063 if (dest_k != NULL && dest_k->is_array_klass()) {
5064 could_have_dest = true;
5065 }
5066 }
5067 if (could_have_src && could_have_dest) {
5068 // If we can have both exact types, emit the missing guards
5069 if (could_have_src && !src_spec) {
5070 src = maybe_cast_profiled_obj(src, src_k, true);
5071 }
5072 if (could_have_dest && !dest_spec) {
5073 dest = maybe_cast_profiled_obj(dest, dest_k, true);
5074 }
5075 }
5076 }
5077 }
5078
5079 ciMethod* trap_method = method();
5080 int trap_bci = bci();
5081 if (saved_jvms != NULL) {
5082 trap_method = alloc->jvms()->method();
5083 trap_bci = alloc->jvms()->bci();
5084 }
5085
5086 bool negative_length_guard_generated = false;
5087
5088 if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) &&
5089 can_emit_guards &&
5090 !src->is_top() && !dest->is_top()) {
5091 // validate arguments: enables transformation the ArrayCopyNode
5092 validated = true;
5093
5094 RegionNode* slow_region = new RegionNode(1);
5095 record_for_igvn(slow_region);
5096
5097 // (1) src and dest are arrays.
5098 generate_non_array_guard(load_object_klass(src), slow_region);
5099 generate_non_array_guard(load_object_klass(dest), slow_region);
5100
5101 // (2) src and dest arrays must have elements of the same BasicType
5102 // done at macro expansion or at Ideal transformation time
5103
5104 // (4) src_offset must not be negative.
5105 generate_negative_guard(src_offset, slow_region);
5106
5107 // (5) dest_offset must not be negative.
5108 generate_negative_guard(dest_offset, slow_region);
5109
5110 // (7) src_offset + length must not exceed length of src.
5111 generate_limit_guard(src_offset, length,
5112 load_array_length(src),
5113 slow_region);
5114
5115 // (8) dest_offset + length must not exceed length of dest.
5116 generate_limit_guard(dest_offset, length,
5117 load_array_length(dest),
5118 slow_region);
5119
5120 // (6) length must not be negative.
5121 // This is also checked in generate_arraycopy() during macro expansion, but
5122 // we also have to check it here for the case where the ArrayCopyNode will
5123 // be eliminated by Escape Analysis.
5124 if (EliminateAllocations) {
5125 generate_negative_guard(length, slow_region);
5126 negative_length_guard_generated = true;
5127 }
5128
5129 // (9) each element of an oop array must be assignable
5130 Node* src_klass = load_object_klass(src);
5131 Node* dest_klass = load_object_klass(dest);
5132 Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
5133
5134 if (not_subtype_ctrl != top()) {
5135 PreserveJVMState pjvms(this);
5136 set_control(not_subtype_ctrl);
5137 uncommon_trap(Deoptimization::Reason_intrinsic,
5138 Deoptimization::Action_make_not_entrant);
5139 assert(stopped(), "Should be stopped");
5140 }
5141
5142 const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
5143 const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
5144 src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
5145
5146 src_type = _gvn.type(src);
5147 top_src = src_type->isa_aryptr();
5148
5149 if (top_dest != NULL &&
5150 top_dest->elem()->make_oopptr() != NULL &&
5151 top_dest->elem()->make_oopptr()->can_be_value_type()) {
5152 generate_valueArray_guard(load_object_klass(dest), slow_region);
5153 }
5154
5155 if (top_src != NULL &&
5156 top_src->elem()->make_oopptr() != NULL &&
5157 top_src->elem()->make_oopptr()->can_be_value_type()) {
5158 generate_valueArray_guard(load_object_klass(src), slow_region);
5159 }
5160
5161 {
5162 PreserveJVMState pjvms(this);
5163 set_control(_gvn.transform(slow_region));
5164 uncommon_trap(Deoptimization::Reason_intrinsic,
5165 Deoptimization::Action_make_not_entrant);
5166 assert(stopped(), "Should be stopped");
5167 }
5168 }
5169
5170 arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
5171
5172 if (stopped()) {
5173 return true;
5174 }
5175
5176 Node* new_src = access_resolve(src, ACCESS_READ);
5177 Node* new_dest = access_resolve(dest, ACCESS_WRITE);
5178
5179 ArrayCopyNode* ac = ArrayCopyNode::make(this, true, new_src, src_offset, new_dest, dest_offset, length, alloc != NULL, negative_length_guard_generated,
5180 // Create LoadRange and LoadKlass nodes for use during macro expansion here
5181 // so the compiler has a chance to eliminate them: during macro expansion,
5182 // we have to set their control (CastPP nodes are eliminated).
5183 load_object_klass(src), load_object_klass(dest),
5184 load_array_length(src), load_array_length(dest));
5185
5186 ac->set_arraycopy(validated);
5187
5188 Node* n = _gvn.transform(ac);
5189 if (n == ac) {
5190 ac->connect_outputs(this);
5191 } else {
5192 assert(validated, "shouldn't transform if all arguments not validated");
5193 set_all_memory(n);
5194 }
5195 clear_upper_avx();
5196
5197
5198 return true;
5199 }
5200
5201
5202 // Helper function which determines if an arraycopy immediately follows
5203 // an allocation, with no intervening tests or other escapes for the object.
5204 AllocateArrayNode*
5205 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
5206 RegionNode* slow_region) {
5207 if (stopped()) return NULL; // no fast path
5208 if (C->AliasLevel() == 0) return NULL; // no MergeMems around
5209
5210 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
5211 if (alloc == NULL) return NULL;
5212
5213 Node* rawmem = memory(Compile::AliasIdxRaw);
5214 // Is the allocation's memory state untouched?
5215 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
5216 // Bail out if there have been raw-memory effects since the allocation.
5217 // (Example: There might have been a call or safepoint.)
5218 return NULL;
5219 }
5220 rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
5221 if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
5222 return NULL;
5223 }
5224
5225 // There must be no unexpected observers of this allocation.
5226 for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
5227 Node* obs = ptr->fast_out(i);
5228 if (obs != this->map()) {
5229 return NULL;
5230 }
5231 }
5232
5233 // This arraycopy must unconditionally follow the allocation of the ptr.
5234 Node* alloc_ctl = ptr->in(0);
5235 assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
5236
5237 Node* ctl = control();
5238 while (ctl != alloc_ctl) {
5239 // There may be guards which feed into the slow_region.
5240 // Any other control flow means that we might not get a chance
5241 // to finish initializing the allocated object.
5242 if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
5243 IfNode* iff = ctl->in(0)->as_If();
5244 Node* not_ctl = iff->proj_out_or_null(1 - ctl->as_Proj()->_con);
5245 assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
5246 if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
5247 ctl = iff->in(0); // This test feeds the known slow_region.
5248 continue;
5249 }
5250 // One more try: Various low-level checks bottom out in
5251 // uncommon traps. If the debug-info of the trap omits
5252 // any reference to the allocation, as we've already
5253 // observed, then there can be no objection to the trap.
5254 bool found_trap = false;
5255 for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
5256 Node* obs = not_ctl->fast_out(j);
5257 if (obs->in(0) == not_ctl && obs->is_Call() &&
5258 (obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
5259 found_trap = true; break;
5260 }
5261 }
5262 if (found_trap) {
5263 ctl = iff->in(0); // This test feeds a harmless uncommon trap.
5264 continue;
5265 }
5266 }
5267 return NULL;
5268 }
5269
5270 // If we get this far, we have an allocation which immediately
5271 // precedes the arraycopy, and we can take over zeroing the new object.
5272 // The arraycopy will finish the initialization, and provide
5273 // a new control state to which we will anchor the destination pointer.
5274
5275 return alloc;
5276 }
5277
5278 //-------------inline_encodeISOArray-----------------------------------
5279 // encode char[] to byte[] in ISO_8859_1
5280 bool LibraryCallKit::inline_encodeISOArray() {
5281 assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
5282 // no receiver since it is static method
5283 Node *src = argument(0);
5284 Node *src_offset = argument(1);
5285 Node *dst = argument(2);
5286 Node *dst_offset = argument(3);
5287 Node *length = argument(4);
5288
5289 src = must_be_not_null(src, true);
5290 dst = must_be_not_null(dst, true);
5291
5292 src = access_resolve(src, ACCESS_READ);
5293 dst = access_resolve(dst, ACCESS_WRITE);
5294
5295 const Type* src_type = src->Value(&_gvn);
5296 const Type* dst_type = dst->Value(&_gvn);
5297 const TypeAryPtr* top_src = src_type->isa_aryptr();
5298 const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5299 if (top_src == NULL || top_src->klass() == NULL ||
5300 top_dest == NULL || top_dest->klass() == NULL) {
5301 // failed array check
5302 return false;
5303 }
5304
5305 // Figure out the size and type of the elements we will be copying.
5306 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5307 BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5308 if (!((src_elem == T_CHAR) || (src_elem== T_BYTE)) || dst_elem != T_BYTE) {
5309 return false;
5310 }
5311
5312 Node* src_start = array_element_address(src, src_offset, T_CHAR);
5313 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5314 // 'src_start' points to src array + scaled offset
5315 // 'dst_start' points to dst array + scaled offset
5316
5317 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
5318 Node* enc = new EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
5319 enc = _gvn.transform(enc);
5320 Node* res_mem = _gvn.transform(new SCMemProjNode(enc));
5321 set_memory(res_mem, mtype);
5322 set_result(enc);
5323 clear_upper_avx();
5324
5325 return true;
5326 }
5327
5328 //-------------inline_multiplyToLen-----------------------------------
5329 bool LibraryCallKit::inline_multiplyToLen() {
5330 assert(UseMultiplyToLenIntrinsic, "not implemented on this platform");
5331
5332 address stubAddr = StubRoutines::multiplyToLen();
5333 if (stubAddr == NULL) {
5334 return false; // Intrinsic's stub is not implemented on this platform
5335 }
5336 const char* stubName = "multiplyToLen";
5337
5338 assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
5339
5340 // no receiver because it is a static method
5341 Node* x = argument(0);
5342 Node* xlen = argument(1);
5343 Node* y = argument(2);
5344 Node* ylen = argument(3);
5345 Node* z = argument(4);
5346
5347 x = must_be_not_null(x, true);
5348 y = must_be_not_null(y, true);
5349
5350 x = access_resolve(x, ACCESS_READ);
5351 y = access_resolve(y, ACCESS_READ);
5352 z = access_resolve(z, ACCESS_WRITE);
5353
5354 const Type* x_type = x->Value(&_gvn);
5355 const Type* y_type = y->Value(&_gvn);
5356 const TypeAryPtr* top_x = x_type->isa_aryptr();
5357 const TypeAryPtr* top_y = y_type->isa_aryptr();
5358 if (top_x == NULL || top_x->klass() == NULL ||
5359 top_y == NULL || top_y->klass() == NULL) {
5360 // failed array check
5361 return false;
5362 }
5363
5364 BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5365 BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5366 if (x_elem != T_INT || y_elem != T_INT) {
5367 return false;
5368 }
5369
5370 // Set the original stack and the reexecute bit for the interpreter to reexecute
5371 // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
5372 // on the return from z array allocation in runtime.
5373 { PreserveReexecuteState preexecs(this);
5374 jvms()->set_should_reexecute(true);
5375
5376 Node* x_start = array_element_address(x, intcon(0), x_elem);
5377 Node* y_start = array_element_address(y, intcon(0), y_elem);
5378 // 'x_start' points to x array + scaled xlen
5379 // 'y_start' points to y array + scaled ylen
5380
5381 // Allocate the result array
5382 Node* zlen = _gvn.transform(new AddINode(xlen, ylen));
5383 ciKlass* klass = ciTypeArrayKlass::make(T_INT);
5384 Node* klass_node = makecon(TypeKlassPtr::make(klass));
5385
5386 IdealKit ideal(this);
5387
5388 #define __ ideal.
5389 Node* one = __ ConI(1);
5390 Node* zero = __ ConI(0);
5391 IdealVariable need_alloc(ideal), z_alloc(ideal); __ declarations_done();
5392 __ set(need_alloc, zero);
5393 __ set(z_alloc, z);
5394 __ if_then(z, BoolTest::eq, null()); {
5395 __ increment (need_alloc, one);
5396 } __ else_(); {
5397 // Update graphKit memory and control from IdealKit.
5398 sync_kit(ideal);
5399 Node *cast = new CastPPNode(z, TypePtr::NOTNULL);
5400 cast->init_req(0, control());
5401 _gvn.set_type(cast, cast->bottom_type());
5402 C->record_for_igvn(cast);
5403
5404 Node* zlen_arg = load_array_length(cast);
5405 // Update IdealKit memory and control from graphKit.
5406 __ sync_kit(this);
5407 __ if_then(zlen_arg, BoolTest::lt, zlen); {
5408 __ increment (need_alloc, one);
5409 } __ end_if();
5410 } __ end_if();
5411
5412 __ if_then(__ value(need_alloc), BoolTest::ne, zero); {
5413 // Update graphKit memory and control from IdealKit.
5414 sync_kit(ideal);
5415 Node * narr = new_array(klass_node, zlen, 1);
5416 // Update IdealKit memory and control from graphKit.
5417 __ sync_kit(this);
5418 __ set(z_alloc, narr);
5419 } __ end_if();
5420
5421 sync_kit(ideal);
5422 z = __ value(z_alloc);
5423 // Can't use TypeAryPtr::INTS which uses Bottom offset.
5424 _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
5425 // Final sync IdealKit and GraphKit.
5426 final_sync(ideal);
5427 #undef __
5428
5429 Node* z_start = array_element_address(z, intcon(0), T_INT);
5430
5431 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5432 OptoRuntime::multiplyToLen_Type(),
5433 stubAddr, stubName, TypePtr::BOTTOM,
5434 x_start, xlen, y_start, ylen, z_start, zlen);
5435 } // original reexecute is set back here
5436
5437 C->set_has_split_ifs(true); // Has chance for split-if optimization
5438 set_result(z);
5439 return true;
5440 }
5441
5442 //-------------inline_squareToLen------------------------------------
5443 bool LibraryCallKit::inline_squareToLen() {
5444 assert(UseSquareToLenIntrinsic, "not implemented on this platform");
5445
5446 address stubAddr = StubRoutines::squareToLen();
5447 if (stubAddr == NULL) {
5448 return false; // Intrinsic's stub is not implemented on this platform
5449 }
5450 const char* stubName = "squareToLen";
5451
5452 assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
5453
5454 Node* x = argument(0);
5455 Node* len = argument(1);
5456 Node* z = argument(2);
5457 Node* zlen = argument(3);
5458
5459 x = must_be_not_null(x, true);
5460 z = must_be_not_null(z, true);
5461
5462 x = access_resolve(x, ACCESS_READ);
5463 z = access_resolve(z, ACCESS_WRITE);
5464
5465 const Type* x_type = x->Value(&_gvn);
5466 const Type* z_type = z->Value(&_gvn);
5467 const TypeAryPtr* top_x = x_type->isa_aryptr();
5468 const TypeAryPtr* top_z = z_type->isa_aryptr();
5469 if (top_x == NULL || top_x->klass() == NULL ||
5470 top_z == NULL || top_z->klass() == NULL) {
5471 // failed array check
5472 return false;
5473 }
5474
5475 BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5476 BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5477 if (x_elem != T_INT || z_elem != T_INT) {
5478 return false;
5479 }
5480
5481
5482 Node* x_start = array_element_address(x, intcon(0), x_elem);
5483 Node* z_start = array_element_address(z, intcon(0), z_elem);
5484
5485 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5486 OptoRuntime::squareToLen_Type(),
5487 stubAddr, stubName, TypePtr::BOTTOM,
5488 x_start, len, z_start, zlen);
5489
5490 set_result(z);
5491 return true;
5492 }
5493
5494 //-------------inline_mulAdd------------------------------------------
5495 bool LibraryCallKit::inline_mulAdd() {
5496 assert(UseMulAddIntrinsic, "not implemented on this platform");
5497
5498 address stubAddr = StubRoutines::mulAdd();
5499 if (stubAddr == NULL) {
5500 return false; // Intrinsic's stub is not implemented on this platform
5501 }
5502 const char* stubName = "mulAdd";
5503
5504 assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
5505
5506 Node* out = argument(0);
5507 Node* in = argument(1);
5508 Node* offset = argument(2);
5509 Node* len = argument(3);
5510 Node* k = argument(4);
5511
5512 out = must_be_not_null(out, true);
5513
5514 in = access_resolve(in, ACCESS_READ);
5515 out = access_resolve(out, ACCESS_WRITE);
5516
5517 const Type* out_type = out->Value(&_gvn);
5518 const Type* in_type = in->Value(&_gvn);
5519 const TypeAryPtr* top_out = out_type->isa_aryptr();
5520 const TypeAryPtr* top_in = in_type->isa_aryptr();
5521 if (top_out == NULL || top_out->klass() == NULL ||
5522 top_in == NULL || top_in->klass() == NULL) {
5523 // failed array check
5524 return false;
5525 }
5526
5527 BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5528 BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5529 if (out_elem != T_INT || in_elem != T_INT) {
5530 return false;
5531 }
5532
5533 Node* outlen = load_array_length(out);
5534 Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
5535 Node* out_start = array_element_address(out, intcon(0), out_elem);
5536 Node* in_start = array_element_address(in, intcon(0), in_elem);
5537
5538 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5539 OptoRuntime::mulAdd_Type(),
5540 stubAddr, stubName, TypePtr::BOTTOM,
5541 out_start,in_start, new_offset, len, k);
5542 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5543 set_result(result);
5544 return true;
5545 }
5546
5547 //-------------inline_montgomeryMultiply-----------------------------------
5548 bool LibraryCallKit::inline_montgomeryMultiply() {
5549 address stubAddr = StubRoutines::montgomeryMultiply();
5550 if (stubAddr == NULL) {
5551 return false; // Intrinsic's stub is not implemented on this platform
5552 }
5553
5554 assert(UseMontgomeryMultiplyIntrinsic, "not implemented on this platform");
5555 const char* stubName = "montgomery_multiply";
5556
5557 assert(callee()->signature()->size() == 7, "montgomeryMultiply has 7 parameters");
5558
5559 Node* a = argument(0);
5560 Node* b = argument(1);
5561 Node* n = argument(2);
5562 Node* len = argument(3);
5563 Node* inv = argument(4);
5564 Node* m = argument(6);
5565
5566 a = access_resolve(a, ACCESS_READ);
5567 b = access_resolve(b, ACCESS_READ);
5568 n = access_resolve(n, ACCESS_READ);
5569 m = access_resolve(m, ACCESS_WRITE);
5570
5571 const Type* a_type = a->Value(&_gvn);
5572 const TypeAryPtr* top_a = a_type->isa_aryptr();
5573 const Type* b_type = b->Value(&_gvn);
5574 const TypeAryPtr* top_b = b_type->isa_aryptr();
5575 const Type* n_type = a->Value(&_gvn);
5576 const TypeAryPtr* top_n = n_type->isa_aryptr();
5577 const Type* m_type = a->Value(&_gvn);
5578 const TypeAryPtr* top_m = m_type->isa_aryptr();
5579 if (top_a == NULL || top_a->klass() == NULL ||
5580 top_b == NULL || top_b->klass() == NULL ||
5581 top_n == NULL || top_n->klass() == NULL ||
5582 top_m == NULL || top_m->klass() == NULL) {
5583 // failed array check
5584 return false;
5585 }
5586
5587 BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5588 BasicType b_elem = b_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5589 BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5590 BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5591 if (a_elem != T_INT || b_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5592 return false;
5593 }
5594
5595 // Make the call
5596 {
5597 Node* a_start = array_element_address(a, intcon(0), a_elem);
5598 Node* b_start = array_element_address(b, intcon(0), b_elem);
5599 Node* n_start = array_element_address(n, intcon(0), n_elem);
5600 Node* m_start = array_element_address(m, intcon(0), m_elem);
5601
5602 Node* call = make_runtime_call(RC_LEAF,
5603 OptoRuntime::montgomeryMultiply_Type(),
5604 stubAddr, stubName, TypePtr::BOTTOM,
5605 a_start, b_start, n_start, len, inv, top(),
5606 m_start);
5607 set_result(m);
5608 }
5609
5610 return true;
5611 }
5612
5613 bool LibraryCallKit::inline_montgomerySquare() {
5614 address stubAddr = StubRoutines::montgomerySquare();
5615 if (stubAddr == NULL) {
5616 return false; // Intrinsic's stub is not implemented on this platform
5617 }
5618
5619 assert(UseMontgomerySquareIntrinsic, "not implemented on this platform");
5620 const char* stubName = "montgomery_square";
5621
5622 assert(callee()->signature()->size() == 6, "montgomerySquare has 6 parameters");
5623
5624 Node* a = argument(0);
5625 Node* n = argument(1);
5626 Node* len = argument(2);
5627 Node* inv = argument(3);
5628 Node* m = argument(5);
5629
5630 a = access_resolve(a, ACCESS_READ);
5631 n = access_resolve(n, ACCESS_READ);
5632 m = access_resolve(m, ACCESS_WRITE);
5633
5634 const Type* a_type = a->Value(&_gvn);
5635 const TypeAryPtr* top_a = a_type->isa_aryptr();
5636 const Type* n_type = a->Value(&_gvn);
5637 const TypeAryPtr* top_n = n_type->isa_aryptr();
5638 const Type* m_type = a->Value(&_gvn);
5639 const TypeAryPtr* top_m = m_type->isa_aryptr();
5640 if (top_a == NULL || top_a->klass() == NULL ||
5641 top_n == NULL || top_n->klass() == NULL ||
5642 top_m == NULL || top_m->klass() == NULL) {
5643 // failed array check
5644 return false;
5645 }
5646
5647 BasicType a_elem = a_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5648 BasicType n_elem = n_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5649 BasicType m_elem = m_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5650 if (a_elem != T_INT || n_elem != T_INT || m_elem != T_INT) {
5651 return false;
5652 }
5653
5654 // Make the call
5655 {
5656 Node* a_start = array_element_address(a, intcon(0), a_elem);
5657 Node* n_start = array_element_address(n, intcon(0), n_elem);
5658 Node* m_start = array_element_address(m, intcon(0), m_elem);
5659
5660 Node* call = make_runtime_call(RC_LEAF,
5661 OptoRuntime::montgomerySquare_Type(),
5662 stubAddr, stubName, TypePtr::BOTTOM,
5663 a_start, n_start, len, inv, top(),
5664 m_start);
5665 set_result(m);
5666 }
5667
5668 return true;
5669 }
5670
5671 //-------------inline_vectorizedMismatch------------------------------
5672 bool LibraryCallKit::inline_vectorizedMismatch() {
5673 assert(UseVectorizedMismatchIntrinsic, "not implementated on this platform");
5674
5675 address stubAddr = StubRoutines::vectorizedMismatch();
5676 if (stubAddr == NULL) {
5677 return false; // Intrinsic's stub is not implemented on this platform
5678 }
5679 const char* stubName = "vectorizedMismatch";
5680 int size_l = callee()->signature()->size();
5681 assert(callee()->signature()->size() == 8, "vectorizedMismatch has 6 parameters");
5682
5683 Node* obja = argument(0);
5684 Node* aoffset = argument(1);
5685 Node* objb = argument(3);
5686 Node* boffset = argument(4);
5687 Node* length = argument(6);
5688 Node* scale = argument(7);
5689
5690 const Type* a_type = obja->Value(&_gvn);
5691 const Type* b_type = objb->Value(&_gvn);
5692 const TypeAryPtr* top_a = a_type->isa_aryptr();
5693 const TypeAryPtr* top_b = b_type->isa_aryptr();
5694 if (top_a == NULL || top_a->klass() == NULL ||
5695 top_b == NULL || top_b->klass() == NULL) {
5696 // failed array check
5697 return false;
5698 }
5699
5700 Node* call;
5701 jvms()->set_should_reexecute(true);
5702
5703 obja = access_resolve(obja, ACCESS_READ);
5704 objb = access_resolve(objb, ACCESS_READ);
5705 Node* obja_adr = make_unsafe_address(obja, aoffset, ACCESS_READ);
5706 Node* objb_adr = make_unsafe_address(objb, boffset, ACCESS_READ);
5707
5708 call = make_runtime_call(RC_LEAF,
5709 OptoRuntime::vectorizedMismatch_Type(),
5710 stubAddr, stubName, TypePtr::BOTTOM,
5711 obja_adr, objb_adr, length, scale);
5712
5713 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5714 set_result(result);
5715 return true;
5716 }
5717
5718 /**
5719 * Calculate CRC32 for byte.
5720 * int java.util.zip.CRC32.update(int crc, int b)
5721 */
5722 bool LibraryCallKit::inline_updateCRC32() {
5723 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5724 assert(callee()->signature()->size() == 2, "update has 2 parameters");
5725 // no receiver since it is static method
5726 Node* crc = argument(0); // type: int
5727 Node* b = argument(1); // type: int
5728
5729 /*
5730 * int c = ~ crc;
5731 * b = timesXtoThe32[(b ^ c) & 0xFF];
5732 * b = b ^ (c >>> 8);
5733 * crc = ~b;
5734 */
5735
5736 Node* M1 = intcon(-1);
5737 crc = _gvn.transform(new XorINode(crc, M1));
5738 Node* result = _gvn.transform(new XorINode(crc, b));
5739 result = _gvn.transform(new AndINode(result, intcon(0xFF)));
5740
5741 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
5742 Node* offset = _gvn.transform(new LShiftINode(result, intcon(0x2)));
5743 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
5744 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
5745
5746 crc = _gvn.transform(new URShiftINode(crc, intcon(8)));
5747 result = _gvn.transform(new XorINode(crc, result));
5748 result = _gvn.transform(new XorINode(result, M1));
5749 set_result(result);
5750 return true;
5751 }
5752
5753 /**
5754 * Calculate CRC32 for byte[] array.
5755 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
5756 */
5757 bool LibraryCallKit::inline_updateBytesCRC32() {
5758 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5759 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5760 // no receiver since it is static method
5761 Node* crc = argument(0); // type: int
5762 Node* src = argument(1); // type: oop
5763 Node* offset = argument(2); // type: int
5764 Node* length = argument(3); // type: int
5765
5766 const Type* src_type = src->Value(&_gvn);
5767 const TypeAryPtr* top_src = src_type->isa_aryptr();
5768 if (top_src == NULL || top_src->klass() == NULL) {
5769 // failed array check
5770 return false;
5771 }
5772
5773 // Figure out the size and type of the elements we will be copying.
5774 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5775 if (src_elem != T_BYTE) {
5776 return false;
5777 }
5778
5779 // 'src_start' points to src array + scaled offset
5780 src = must_be_not_null(src, true);
5781 src = access_resolve(src, ACCESS_READ);
5782 Node* src_start = array_element_address(src, offset, src_elem);
5783
5784 // We assume that range check is done by caller.
5785 // TODO: generate range check (offset+length < src.length) in debug VM.
5786
5787 // Call the stub.
5788 address stubAddr = StubRoutines::updateBytesCRC32();
5789 const char *stubName = "updateBytesCRC32";
5790
5791 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5792 stubAddr, stubName, TypePtr::BOTTOM,
5793 crc, src_start, length);
5794 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5795 set_result(result);
5796 return true;
5797 }
5798
5799 /**
5800 * Calculate CRC32 for ByteBuffer.
5801 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
5802 */
5803 bool LibraryCallKit::inline_updateByteBufferCRC32() {
5804 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5805 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5806 // no receiver since it is static method
5807 Node* crc = argument(0); // type: int
5808 Node* src = argument(1); // type: long
5809 Node* offset = argument(3); // type: int
5810 Node* length = argument(4); // type: int
5811
5812 src = ConvL2X(src); // adjust Java long to machine word
5813 Node* base = _gvn.transform(new CastX2PNode(src));
5814 offset = ConvI2X(offset);
5815
5816 // 'src_start' points to src array + scaled offset
5817 Node* src_start = basic_plus_adr(top(), base, offset);
5818
5819 // Call the stub.
5820 address stubAddr = StubRoutines::updateBytesCRC32();
5821 const char *stubName = "updateBytesCRC32";
5822
5823 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5824 stubAddr, stubName, TypePtr::BOTTOM,
5825 crc, src_start, length);
5826 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5827 set_result(result);
5828 return true;
5829 }
5830
5831 //------------------------------get_table_from_crc32c_class-----------------------
5832 Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) {
5833 Node* table = load_field_from_object(NULL, "byteTable", "[I", /*is_exact*/ false, /*is_static*/ true, crc32c_class);
5834 assert (table != NULL, "wrong version of java.util.zip.CRC32C");
5835
5836 return table;
5837 }
5838
5839 //------------------------------inline_updateBytesCRC32C-----------------------
5840 //
5841 // Calculate CRC32C for byte[] array.
5842 // int java.util.zip.CRC32C.updateBytes(int crc, byte[] buf, int off, int end)
5843 //
5844 bool LibraryCallKit::inline_updateBytesCRC32C() {
5845 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5846 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5847 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5848 // no receiver since it is a static method
5849 Node* crc = argument(0); // type: int
5850 Node* src = argument(1); // type: oop
5851 Node* offset = argument(2); // type: int
5852 Node* end = argument(3); // type: int
5853
5854 Node* length = _gvn.transform(new SubINode(end, offset));
5855
5856 const Type* src_type = src->Value(&_gvn);
5857 const TypeAryPtr* top_src = src_type->isa_aryptr();
5858 if (top_src == NULL || top_src->klass() == NULL) {
5859 // failed array check
5860 return false;
5861 }
5862
5863 // Figure out the size and type of the elements we will be copying.
5864 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5865 if (src_elem != T_BYTE) {
5866 return false;
5867 }
5868
5869 // 'src_start' points to src array + scaled offset
5870 src = must_be_not_null(src, true);
5871 src = access_resolve(src, ACCESS_READ);
5872 Node* src_start = array_element_address(src, offset, src_elem);
5873
5874 // static final int[] byteTable in class CRC32C
5875 Node* table = get_table_from_crc32c_class(callee()->holder());
5876 table = must_be_not_null(table, true);
5877 table = access_resolve(table, ACCESS_READ);
5878 Node* table_start = array_element_address(table, intcon(0), T_INT);
5879
5880 // We assume that range check is done by caller.
5881 // TODO: generate range check (offset+length < src.length) in debug VM.
5882
5883 // Call the stub.
5884 address stubAddr = StubRoutines::updateBytesCRC32C();
5885 const char *stubName = "updateBytesCRC32C";
5886
5887 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5888 stubAddr, stubName, TypePtr::BOTTOM,
5889 crc, src_start, length, table_start);
5890 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5891 set_result(result);
5892 return true;
5893 }
5894
5895 //------------------------------inline_updateDirectByteBufferCRC32C-----------------------
5896 //
5897 // Calculate CRC32C for DirectByteBuffer.
5898 // int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
5899 //
5900 bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() {
5901 assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
5902 assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
5903 assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
5904 // no receiver since it is a static method
5905 Node* crc = argument(0); // type: int
5906 Node* src = argument(1); // type: long
5907 Node* offset = argument(3); // type: int
5908 Node* end = argument(4); // type: int
5909
5910 Node* length = _gvn.transform(new SubINode(end, offset));
5911
5912 src = ConvL2X(src); // adjust Java long to machine word
5913 Node* base = _gvn.transform(new CastX2PNode(src));
5914 offset = ConvI2X(offset);
5915
5916 // 'src_start' points to src array + scaled offset
5917 Node* src_start = basic_plus_adr(top(), base, offset);
5918
5919 // static final int[] byteTable in class CRC32C
5920 Node* table = get_table_from_crc32c_class(callee()->holder());
5921 table = must_be_not_null(table, true);
5922 table = access_resolve(table, ACCESS_READ);
5923 Node* table_start = array_element_address(table, intcon(0), T_INT);
5924
5925 // Call the stub.
5926 address stubAddr = StubRoutines::updateBytesCRC32C();
5927 const char *stubName = "updateBytesCRC32C";
5928
5929 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
5930 stubAddr, stubName, TypePtr::BOTTOM,
5931 crc, src_start, length, table_start);
5932 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5933 set_result(result);
5934 return true;
5935 }
5936
5937 //------------------------------inline_updateBytesAdler32----------------------
5938 //
5939 // Calculate Adler32 checksum for byte[] array.
5940 // int java.util.zip.Adler32.updateBytes(int crc, byte[] buf, int off, int len)
5941 //
5942 bool LibraryCallKit::inline_updateBytesAdler32() {
5943 assert(UseAdler32Intrinsics, "Adler32 Instrinsic support need"); // check if we actually need to check this flag or check a different one
5944 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5945 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
5946 // no receiver since it is static method
5947 Node* crc = argument(0); // type: int
5948 Node* src = argument(1); // type: oop
5949 Node* offset = argument(2); // type: int
5950 Node* length = argument(3); // type: int
5951
5952 const Type* src_type = src->Value(&_gvn);
5953 const TypeAryPtr* top_src = src_type->isa_aryptr();
5954 if (top_src == NULL || top_src->klass() == NULL) {
5955 // failed array check
5956 return false;
5957 }
5958
5959 // Figure out the size and type of the elements we will be copying.
5960 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5961 if (src_elem != T_BYTE) {
5962 return false;
5963 }
5964
5965 // 'src_start' points to src array + scaled offset
5966 src = access_resolve(src, ACCESS_READ);
5967 Node* src_start = array_element_address(src, offset, src_elem);
5968
5969 // We assume that range check is done by caller.
5970 // TODO: generate range check (offset+length < src.length) in debug VM.
5971
5972 // Call the stub.
5973 address stubAddr = StubRoutines::updateBytesAdler32();
5974 const char *stubName = "updateBytesAdler32";
5975
5976 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
5977 stubAddr, stubName, TypePtr::BOTTOM,
5978 crc, src_start, length);
5979 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
5980 set_result(result);
5981 return true;
5982 }
5983
5984 //------------------------------inline_updateByteBufferAdler32---------------
5985 //
5986 // Calculate Adler32 checksum for DirectByteBuffer.
5987 // int java.util.zip.Adler32.updateByteBuffer(int crc, long buf, int off, int len)
5988 //
5989 bool LibraryCallKit::inline_updateByteBufferAdler32() {
5990 assert(UseAdler32Intrinsics, "Adler32 Instrinsic support need"); // check if we actually need to check this flag or check a different one
5991 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5992 assert(callee()->holder()->is_loaded(), "Adler32 class must be loaded");
5993 // no receiver since it is static method
5994 Node* crc = argument(0); // type: int
5995 Node* src = argument(1); // type: long
5996 Node* offset = argument(3); // type: int
5997 Node* length = argument(4); // type: int
5998
5999 src = ConvL2X(src); // adjust Java long to machine word
6000 Node* base = _gvn.transform(new CastX2PNode(src));
6001 offset = ConvI2X(offset);
6002
6003 // 'src_start' points to src array + scaled offset
6004 Node* src_start = basic_plus_adr(top(), base, offset);
6005
6006 // Call the stub.
6007 address stubAddr = StubRoutines::updateBytesAdler32();
6008 const char *stubName = "updateBytesAdler32";
6009
6010 Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesAdler32_Type(),
6011 stubAddr, stubName, TypePtr::BOTTOM,
6012 crc, src_start, length);
6013
6014 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6015 set_result(result);
6016 return true;
6017 }
6018
6019 //----------------------------inline_reference_get----------------------------
6020 // public T java.lang.ref.Reference.get();
6021 bool LibraryCallKit::inline_reference_get() {
6022 const int referent_offset = java_lang_ref_Reference::referent_offset;
6023 guarantee(referent_offset > 0, "should have already been set");
6024
6025 // Get the argument:
6026 Node* reference_obj = null_check_receiver();
6027 if (stopped()) return true;
6028
6029 const TypeInstPtr* tinst = _gvn.type(reference_obj)->isa_instptr();
6030 assert(tinst != NULL, "obj is null");
6031 assert(tinst->klass()->is_loaded(), "obj is not loaded");
6032 ciInstanceKlass* referenceKlass = tinst->klass()->as_instance_klass();
6033 ciField* field = referenceKlass->get_field_by_name(ciSymbol::make("referent"),
6034 ciSymbol::make("Ljava/lang/Object;"),
6035 false);
6036 assert (field != NULL, "undefined field");
6037
6038 Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
6039 const TypePtr* adr_type = C->alias_type(field)->adr_type();
6040
6041 ciInstanceKlass* klass = env()->Object_klass();
6042 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
6043
6044 DecoratorSet decorators = IN_HEAP | ON_WEAK_OOP_REF;
6045 Node* result = access_load_at(reference_obj, adr, adr_type, object_type, T_OBJECT, decorators);
6046 // Add memory barrier to prevent commoning reads from this field
6047 // across safepoint since GC can change its value.
6048 insert_mem_bar(Op_MemBarCPUOrder);
6049
6050 set_result(result);
6051 return true;
6052 }
6053
6054
6055 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6056 bool is_exact=true, bool is_static=false,
6057 ciInstanceKlass * fromKls=NULL) {
6058 if (fromKls == NULL) {
6059 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6060 assert(tinst != NULL, "obj is null");
6061 assert(tinst->klass()->is_loaded(), "obj is not loaded");
6062 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6063 fromKls = tinst->klass()->as_instance_klass();
6064 } else {
6065 assert(is_static, "only for static field access");
6066 }
6067 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
6068 ciSymbol::make(fieldTypeString),
6069 is_static);
6070
6071 assert (field != NULL, "undefined field");
6072 if (field == NULL) return (Node *) NULL;
6073
6074 if (is_static) {
6075 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
6076 fromObj = makecon(tip);
6077 }
6078
6079 // Next code copied from Parse::do_get_xxx():
6080
6081 // Compute address and memory type.
6082 int offset = field->offset_in_bytes();
6083 bool is_vol = field->is_volatile();
6084 ciType* field_klass = field->type();
6085 assert(field_klass->is_loaded(), "should be loaded");
6086 const TypePtr* adr_type = C->alias_type(field)->adr_type();
6087 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6088 BasicType bt = field->layout_type();
6089
6090 // Build the resultant type of the load
6091 const Type *type;
6092 if (bt == T_OBJECT) {
6093 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
6094 } else {
6095 type = Type::get_const_basic_type(bt);
6096 }
6097
6098 DecoratorSet decorators = IN_HEAP;
6099
6100 if (is_vol) {
6101 decorators |= MO_SEQ_CST;
6102 }
6103
6104 return access_load_at(fromObj, adr, adr_type, type, bt, decorators);
6105 }
6106
6107 Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
6108 bool is_exact = true, bool is_static = false,
6109 ciInstanceKlass * fromKls = NULL) {
6110 if (fromKls == NULL) {
6111 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
6112 assert(tinst != NULL, "obj is null");
6113 assert(tinst->klass()->is_loaded(), "obj is not loaded");
6114 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
6115 fromKls = tinst->klass()->as_instance_klass();
6116 }
6117 else {
6118 assert(is_static, "only for static field access");
6119 }
6120 ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
6121 ciSymbol::make(fieldTypeString),
6122 is_static);
6123
6124 assert(field != NULL, "undefined field");
6125 assert(!field->is_volatile(), "not defined for volatile fields");
6126
6127 if (is_static) {
6128 const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
6129 fromObj = makecon(tip);
6130 }
6131
6132 // Next code copied from Parse::do_get_xxx():
6133
6134 // Compute address and memory type.
6135 int offset = field->offset_in_bytes();
6136 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
6137
6138 return adr;
6139 }
6140
6141 //------------------------------inline_aescrypt_Block-----------------------
6142 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
6143 address stubAddr = NULL;
6144 const char *stubName;
6145 assert(UseAES, "need AES instruction support");
6146
6147 switch(id) {
6148 case vmIntrinsics::_aescrypt_encryptBlock:
6149 stubAddr = StubRoutines::aescrypt_encryptBlock();
6150 stubName = "aescrypt_encryptBlock";
6151 break;
6152 case vmIntrinsics::_aescrypt_decryptBlock:
6153 stubAddr = StubRoutines::aescrypt_decryptBlock();
6154 stubName = "aescrypt_decryptBlock";
6155 break;
6156 default:
6157 break;
6158 }
6159 if (stubAddr == NULL) return false;
6160
6161 Node* aescrypt_object = argument(0);
6162 Node* src = argument(1);
6163 Node* src_offset = argument(2);
6164 Node* dest = argument(3);
6165 Node* dest_offset = argument(4);
6166
6167 src = must_be_not_null(src, true);
6168 dest = must_be_not_null(dest, true);
6169
6170 src = access_resolve(src, ACCESS_READ);
6171 dest = access_resolve(dest, ACCESS_WRITE);
6172
6173 // (1) src and dest are arrays.
6174 const Type* src_type = src->Value(&_gvn);
6175 const Type* dest_type = dest->Value(&_gvn);
6176 const TypeAryPtr* top_src = src_type->isa_aryptr();
6177 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6178 assert (top_src != NULL && top_src->klass() != NULL && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6179
6180 // for the quick and dirty code we will skip all the checks.
6181 // we are just trying to get the call to be generated.
6182 Node* src_start = src;
6183 Node* dest_start = dest;
6184 if (src_offset != NULL || dest_offset != NULL) {
6185 assert(src_offset != NULL && dest_offset != NULL, "");
6186 src_start = array_element_address(src, src_offset, T_BYTE);
6187 dest_start = array_element_address(dest, dest_offset, T_BYTE);
6188 }
6189
6190 // now need to get the start of its expanded key array
6191 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6192 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6193 if (k_start == NULL) return false;
6194
6195 if (Matcher::pass_original_key_for_aes()) {
6196 // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
6197 // compatibility issues between Java key expansion and SPARC crypto instructions
6198 Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
6199 if (original_k_start == NULL) return false;
6200
6201 // Call the stub.
6202 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
6203 stubAddr, stubName, TypePtr::BOTTOM,
6204 src_start, dest_start, k_start, original_k_start);
6205 } else {
6206 // Call the stub.
6207 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
6208 stubAddr, stubName, TypePtr::BOTTOM,
6209 src_start, dest_start, k_start);
6210 }
6211
6212 return true;
6213 }
6214
6215 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
6216 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
6217 address stubAddr = NULL;
6218 const char *stubName = NULL;
6219
6220 assert(UseAES, "need AES instruction support");
6221
6222 switch(id) {
6223 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
6224 stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
6225 stubName = "cipherBlockChaining_encryptAESCrypt";
6226 break;
6227 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
6228 stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
6229 stubName = "cipherBlockChaining_decryptAESCrypt";
6230 break;
6231 default:
6232 break;
6233 }
6234 if (stubAddr == NULL) return false;
6235
6236 Node* cipherBlockChaining_object = argument(0);
6237 Node* src = argument(1);
6238 Node* src_offset = argument(2);
6239 Node* len = argument(3);
6240 Node* dest = argument(4);
6241 Node* dest_offset = argument(5);
6242
6243 src = must_be_not_null(src, false);
6244 dest = must_be_not_null(dest, false);
6245
6246 src = access_resolve(src, ACCESS_READ);
6247 dest = access_resolve(dest, ACCESS_WRITE);
6248
6249 // (1) src and dest are arrays.
6250 const Type* src_type = src->Value(&_gvn);
6251 const Type* dest_type = dest->Value(&_gvn);
6252 const TypeAryPtr* top_src = src_type->isa_aryptr();
6253 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6254 assert (top_src != NULL && top_src->klass() != NULL
6255 && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6256
6257 // checks are the responsibility of the caller
6258 Node* src_start = src;
6259 Node* dest_start = dest;
6260 if (src_offset != NULL || dest_offset != NULL) {
6261 assert(src_offset != NULL && dest_offset != NULL, "");
6262 src_start = array_element_address(src, src_offset, T_BYTE);
6263 dest_start = array_element_address(dest, dest_offset, T_BYTE);
6264 }
6265
6266 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6267 // (because of the predicated logic executed earlier).
6268 // so we cast it here safely.
6269 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6270
6271 Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6272 if (embeddedCipherObj == NULL) return false;
6273
6274 // cast it to what we know it will be at runtime
6275 const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
6276 assert(tinst != NULL, "CBC obj is null");
6277 assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
6278 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6279 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6280
6281 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6282 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6283 const TypeOopPtr* xtype = aklass->as_instance_type();
6284 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
6285 aescrypt_object = _gvn.transform(aescrypt_object);
6286
6287 // we need to get the start of the aescrypt_object's expanded key array
6288 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6289 if (k_start == NULL) return false;
6290
6291 // similarly, get the start address of the r vector
6292 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
6293 if (objRvec == NULL) return false;
6294 objRvec = access_resolve(objRvec, ACCESS_WRITE);
6295 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
6296
6297 Node* cbcCrypt;
6298 if (Matcher::pass_original_key_for_aes()) {
6299 // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
6300 // compatibility issues between Java key expansion and SPARC crypto instructions
6301 Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
6302 if (original_k_start == NULL) return false;
6303
6304 // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
6305 cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6306 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6307 stubAddr, stubName, TypePtr::BOTTOM,
6308 src_start, dest_start, k_start, r_start, len, original_k_start);
6309 } else {
6310 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6311 cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6312 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6313 stubAddr, stubName, TypePtr::BOTTOM,
6314 src_start, dest_start, k_start, r_start, len);
6315 }
6316
6317 // return cipher length (int)
6318 Node* retvalue = _gvn.transform(new ProjNode(cbcCrypt, TypeFunc::Parms));
6319 set_result(retvalue);
6320 return true;
6321 }
6322
6323 //------------------------------inline_counterMode_AESCrypt-----------------------
6324 bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) {
6325 assert(UseAES, "need AES instruction support");
6326 if (!UseAESCTRIntrinsics) return false;
6327
6328 address stubAddr = NULL;
6329 const char *stubName = NULL;
6330 if (id == vmIntrinsics::_counterMode_AESCrypt) {
6331 stubAddr = StubRoutines::counterMode_AESCrypt();
6332 stubName = "counterMode_AESCrypt";
6333 }
6334 if (stubAddr == NULL) return false;
6335
6336 Node* counterMode_object = argument(0);
6337 Node* src = argument(1);
6338 Node* src_offset = argument(2);
6339 Node* len = argument(3);
6340 Node* dest = argument(4);
6341 Node* dest_offset = argument(5);
6342
6343 src = access_resolve(src, ACCESS_READ);
6344 dest = access_resolve(dest, ACCESS_WRITE);
6345 counterMode_object = access_resolve(counterMode_object, ACCESS_WRITE);
6346
6347 // (1) src and dest are arrays.
6348 const Type* src_type = src->Value(&_gvn);
6349 const Type* dest_type = dest->Value(&_gvn);
6350 const TypeAryPtr* top_src = src_type->isa_aryptr();
6351 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
6352 assert(top_src != NULL && top_src->klass() != NULL &&
6353 top_dest != NULL && top_dest->klass() != NULL, "args are strange");
6354
6355 // checks are the responsibility of the caller
6356 Node* src_start = src;
6357 Node* dest_start = dest;
6358 if (src_offset != NULL || dest_offset != NULL) {
6359 assert(src_offset != NULL && dest_offset != NULL, "");
6360 src_start = array_element_address(src, src_offset, T_BYTE);
6361 dest_start = array_element_address(dest, dest_offset, T_BYTE);
6362 }
6363
6364 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
6365 // (because of the predicated logic executed earlier).
6366 // so we cast it here safely.
6367 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
6368 Node* embeddedCipherObj = load_field_from_object(counterMode_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6369 if (embeddedCipherObj == NULL) return false;
6370 // cast it to what we know it will be at runtime
6371 const TypeInstPtr* tinst = _gvn.type(counterMode_object)->isa_instptr();
6372 assert(tinst != NULL, "CTR obj is null");
6373 assert(tinst->klass()->is_loaded(), "CTR obj is not loaded");
6374 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6375 assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
6376 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6377 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6378 const TypeOopPtr* xtype = aklass->as_instance_type();
6379 Node* aescrypt_object = new CheckCastPPNode(control(), embeddedCipherObj, xtype);
6380 aescrypt_object = _gvn.transform(aescrypt_object);
6381 // we need to get the start of the aescrypt_object's expanded key array
6382 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6383 if (k_start == NULL) return false;
6384 // similarly, get the start address of the r vector
6385 Node* obj_counter = load_field_from_object(counterMode_object, "counter", "[B", /*is_exact*/ false);
6386 if (obj_counter == NULL) return false;
6387 obj_counter = access_resolve(obj_counter, ACCESS_WRITE);
6388 Node* cnt_start = array_element_address(obj_counter, intcon(0), T_BYTE);
6389
6390 Node* saved_encCounter = load_field_from_object(counterMode_object, "encryptedCounter", "[B", /*is_exact*/ false);
6391 if (saved_encCounter == NULL) return false;
6392 saved_encCounter = access_resolve(saved_encCounter, ACCESS_WRITE);
6393 Node* saved_encCounter_start = array_element_address(saved_encCounter, intcon(0), T_BYTE);
6394 Node* used = field_address_from_object(counterMode_object, "used", "I", /*is_exact*/ false);
6395
6396 Node* ctrCrypt;
6397 if (Matcher::pass_original_key_for_aes()) {
6398 // no SPARC version for AES/CTR intrinsics now.
6399 return false;
6400 }
6401 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6402 ctrCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6403 OptoRuntime::counterMode_aescrypt_Type(),
6404 stubAddr, stubName, TypePtr::BOTTOM,
6405 src_start, dest_start, k_start, cnt_start, len, saved_encCounter_start, used);
6406
6407 // return cipher length (int)
6408 Node* retvalue = _gvn.transform(new ProjNode(ctrCrypt, TypeFunc::Parms));
6409 set_result(retvalue);
6410 return true;
6411 }
6412
6413 //------------------------------get_key_start_from_aescrypt_object-----------------------
6414 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6415 #if defined(PPC64) || defined(S390)
6416 // MixColumns for decryption can be reduced by preprocessing MixColumns with round keys.
6417 // Intel's extention is based on this optimization and AESCrypt generates round keys by preprocessing MixColumns.
6418 // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
6419 // The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]).
6420 Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I", /*is_exact*/ false);
6421 assert (objSessionK != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6422 if (objSessionK == NULL) {
6423 return (Node *) NULL;
6424 }
6425 Node* objAESCryptKey = load_array_element(control(), objSessionK, intcon(0), TypeAryPtr::OOPS);
6426 #else
6427 Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
6428 #endif // PPC64
6429 assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6430 if (objAESCryptKey == NULL) return (Node *) NULL;
6431
6432 // now have the array, need to get the start address of the K array
6433 objAESCryptKey = access_resolve(objAESCryptKey, ACCESS_READ);
6434 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6435 return k_start;
6436 }
6437
6438 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
6439 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6440 Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6441 assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6442 if (objAESCryptKey == NULL) return (Node *) NULL;
6443
6444 // now have the array, need to get the start address of the lastKey array
6445 objAESCryptKey = access_resolve(objAESCryptKey, ACCESS_READ);
6446 Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6447 return original_k_start;
6448 }
6449
6450 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6451 // Return node representing slow path of predicate check.
6452 // the pseudo code we want to emulate with this predicate is:
6453 // for encryption:
6454 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6455 // for decryption:
6456 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6457 // note cipher==plain is more conservative than the original java code but that's OK
6458 //
6459 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6460 // The receiver was checked for NULL already.
6461 Node* objCBC = argument(0);
6462
6463 Node* src = argument(1);
6464 Node* dest = argument(4);
6465
6466 // Load embeddedCipher field of CipherBlockChaining object.
6467 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6468
6469 // get AESCrypt klass for instanceOf check
6470 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6471 // will have same classloader as CipherBlockChaining object
6472 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6473 assert(tinst != NULL, "CBCobj is null");
6474 assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6475
6476 // we want to do an instanceof comparison against the AESCrypt class
6477 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6478 if (!klass_AESCrypt->is_loaded()) {
6479 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6480 Node* ctrl = control();
6481 set_control(top()); // no regular fast path
6482 return ctrl;
6483 }
6484
6485 src = must_be_not_null(src, true);
6486 dest = must_be_not_null(dest, true);
6487
6488 // Resolve oops to stable for CmpP below.
6489 src = access_resolve(src, 0);
6490 dest = access_resolve(dest, 0);
6491
6492 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6493
6494 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6495 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6496 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6497
6498 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6499
6500 // for encryption, we are done
6501 if (!decrypting)
6502 return instof_false; // even if it is NULL
6503
6504 // for decryption, we need to add a further check to avoid
6505 // taking the intrinsic path when cipher and plain are the same
6506 // see the original java code for why.
6507 RegionNode* region = new RegionNode(3);
6508 region->init_req(1, instof_false);
6509
6510 Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest));
6511 Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq));
6512 Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6513 region->init_req(2, src_dest_conjoint);
6514
6515 record_for_igvn(region);
6516 return _gvn.transform(region);
6517 }
6518
6519 //----------------------------inline_counterMode_AESCrypt_predicate----------------------------
6520 // Return node representing slow path of predicate check.
6521 // the pseudo code we want to emulate with this predicate is:
6522 // for encryption:
6523 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6524 // for decryption:
6525 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6526 // note cipher==plain is more conservative than the original java code but that's OK
6527 //
6528
6529 Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() {
6530 // The receiver was checked for NULL already.
6531 Node* objCTR = argument(0);
6532
6533 // Load embeddedCipher field of CipherBlockChaining object.
6534 Node* embeddedCipherObj = load_field_from_object(objCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6535
6536 // get AESCrypt klass for instanceOf check
6537 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6538 // will have same classloader as CipherBlockChaining object
6539 const TypeInstPtr* tinst = _gvn.type(objCTR)->isa_instptr();
6540 assert(tinst != NULL, "CTRobj is null");
6541 assert(tinst->klass()->is_loaded(), "CTRobj is not loaded");
6542
6543 // we want to do an instanceof comparison against the AESCrypt class
6544 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6545 if (!klass_AESCrypt->is_loaded()) {
6546 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6547 Node* ctrl = control();
6548 set_control(top()); // no regular fast path
6549 return ctrl;
6550 }
6551
6552 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6553 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6554 Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1)));
6555 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6556 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6557
6558 return instof_false; // even if it is NULL
6559 }
6560
6561 //------------------------------inline_ghash_processBlocks
6562 bool LibraryCallKit::inline_ghash_processBlocks() {
6563 address stubAddr;
6564 const char *stubName;
6565 assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
6566
6567 stubAddr = StubRoutines::ghash_processBlocks();
6568 stubName = "ghash_processBlocks";
6569
6570 Node* data = argument(0);
6571 Node* offset = argument(1);
6572 Node* len = argument(2);
6573 Node* state = argument(3);
6574 Node* subkeyH = argument(4);
6575
6576 state = must_be_not_null(state, true);
6577 subkeyH = must_be_not_null(subkeyH, true);
6578 data = must_be_not_null(data, true);
6579
6580 state = access_resolve(state, ACCESS_WRITE);
6581 subkeyH = access_resolve(subkeyH, ACCESS_READ);
6582 data = access_resolve(data, ACCESS_READ);
6583
6584 Node* state_start = array_element_address(state, intcon(0), T_LONG);
6585 assert(state_start, "state is NULL");
6586 Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG);
6587 assert(subkeyH_start, "subkeyH is NULL");
6588 Node* data_start = array_element_address(data, offset, T_BYTE);
6589 assert(data_start, "data is NULL");
6590
6591 Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
6592 OptoRuntime::ghash_processBlocks_Type(),
6593 stubAddr, stubName, TypePtr::BOTTOM,
6594 state_start, subkeyH_start, data_start, len);
6595 return true;
6596 }
6597
6598 bool LibraryCallKit::inline_base64_encodeBlock() {
6599 address stubAddr;
6600 const char *stubName;
6601 assert(UseBASE64Intrinsics, "need Base64 intrinsics support");
6602 assert(callee()->signature()->size() == 6, "base64_encodeBlock has 6 parameters");
6603 stubAddr = StubRoutines::base64_encodeBlock();
6604 stubName = "encodeBlock";
6605
6606 if (!stubAddr) return false;
6607 Node* base64obj = argument(0);
6608 Node* src = argument(1);
6609 Node* offset = argument(2);
6610 Node* len = argument(3);
6611 Node* dest = argument(4);
6612 Node* dp = argument(5);
6613 Node* isURL = argument(6);
6614
6615 src = must_be_not_null(src, true);
6616 src = access_resolve(src, ACCESS_READ);
6617 dest = must_be_not_null(dest, true);
6618 dest = access_resolve(dest, ACCESS_WRITE);
6619
6620 Node* src_start = array_element_address(src, intcon(0), T_BYTE);
6621 assert(src_start, "source array is NULL");
6622 Node* dest_start = array_element_address(dest, intcon(0), T_BYTE);
6623 assert(dest_start, "destination array is NULL");
6624
6625 Node* base64 = make_runtime_call(RC_LEAF,
6626 OptoRuntime::base64_encodeBlock_Type(),
6627 stubAddr, stubName, TypePtr::BOTTOM,
6628 src_start, offset, len, dest_start, dp, isURL);
6629 return true;
6630 }
6631
6632 //------------------------------inline_sha_implCompress-----------------------
6633 //
6634 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
6635 // void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
6636 //
6637 // Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
6638 // void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
6639 //
6640 // Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
6641 // void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
6642 //
6643 bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) {
6644 assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
6645
6646 Node* sha_obj = argument(0);
6647 Node* src = argument(1); // type oop
6648 Node* ofs = argument(2); // type int
6649
6650 const Type* src_type = src->Value(&_gvn);
6651 const TypeAryPtr* top_src = src_type->isa_aryptr();
6652 if (top_src == NULL || top_src->klass() == NULL) {
6653 // failed array check
6654 return false;
6655 }
6656 // Figure out the size and type of the elements we will be copying.
6657 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6658 if (src_elem != T_BYTE) {
6659 return false;
6660 }
6661 // 'src_start' points to src array + offset
6662 src = must_be_not_null(src, true);
6663 src = access_resolve(src, ACCESS_READ);
6664 Node* src_start = array_element_address(src, ofs, src_elem);
6665 Node* state = NULL;
6666 address stubAddr;
6667 const char *stubName;
6668
6669 switch(id) {
6670 case vmIntrinsics::_sha_implCompress:
6671 assert(UseSHA1Intrinsics, "need SHA1 instruction support");
6672 state = get_state_from_sha_object(sha_obj);
6673 stubAddr = StubRoutines::sha1_implCompress();
6674 stubName = "sha1_implCompress";
6675 break;
6676 case vmIntrinsics::_sha2_implCompress:
6677 assert(UseSHA256Intrinsics, "need SHA256 instruction support");
6678 state = get_state_from_sha_object(sha_obj);
6679 stubAddr = StubRoutines::sha256_implCompress();
6680 stubName = "sha256_implCompress";
6681 break;
6682 case vmIntrinsics::_sha5_implCompress:
6683 assert(UseSHA512Intrinsics, "need SHA512 instruction support");
6684 state = get_state_from_sha5_object(sha_obj);
6685 stubAddr = StubRoutines::sha512_implCompress();
6686 stubName = "sha512_implCompress";
6687 break;
6688 default:
6689 fatal_unexpected_iid(id);
6690 return false;
6691 }
6692 if (state == NULL) return false;
6693
6694 // Call the stub.
6695 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::sha_implCompress_Type(),
6696 stubAddr, stubName, TypePtr::BOTTOM,
6697 src_start, state);
6698
6699 return true;
6700 }
6701
6702 //------------------------------inline_digestBase_implCompressMB-----------------------
6703 //
6704 // Calculate SHA/SHA2/SHA5 for multi-block byte[] array.
6705 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
6706 //
6707 bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
6708 assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6709 "need SHA1/SHA256/SHA512 instruction support");
6710 assert((uint)predicate < 3, "sanity");
6711 assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
6712
6713 Node* digestBase_obj = argument(0); // The receiver was checked for NULL already.
6714 Node* src = argument(1); // byte[] array
6715 Node* ofs = argument(2); // type int
6716 Node* limit = argument(3); // type int
6717
6718 const Type* src_type = src->Value(&_gvn);
6719 const TypeAryPtr* top_src = src_type->isa_aryptr();
6720 if (top_src == NULL || top_src->klass() == NULL) {
6721 // failed array check
6722 return false;
6723 }
6724 // Figure out the size and type of the elements we will be copying.
6725 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
6726 if (src_elem != T_BYTE) {
6727 return false;
6728 }
6729 // 'src_start' points to src array + offset
6730 src = must_be_not_null(src, false);
6731 src = access_resolve(src, ACCESS_READ);
6732 Node* src_start = array_element_address(src, ofs, src_elem);
6733
6734 const char* klass_SHA_name = NULL;
6735 const char* stub_name = NULL;
6736 address stub_addr = NULL;
6737 bool long_state = false;
6738
6739 switch (predicate) {
6740 case 0:
6741 if (UseSHA1Intrinsics) {
6742 klass_SHA_name = "sun/security/provider/SHA";
6743 stub_name = "sha1_implCompressMB";
6744 stub_addr = StubRoutines::sha1_implCompressMB();
6745 }
6746 break;
6747 case 1:
6748 if (UseSHA256Intrinsics) {
6749 klass_SHA_name = "sun/security/provider/SHA2";
6750 stub_name = "sha256_implCompressMB";
6751 stub_addr = StubRoutines::sha256_implCompressMB();
6752 }
6753 break;
6754 case 2:
6755 if (UseSHA512Intrinsics) {
6756 klass_SHA_name = "sun/security/provider/SHA5";
6757 stub_name = "sha512_implCompressMB";
6758 stub_addr = StubRoutines::sha512_implCompressMB();
6759 long_state = true;
6760 }
6761 break;
6762 default:
6763 fatal("unknown SHA intrinsic predicate: %d", predicate);
6764 }
6765 if (klass_SHA_name != NULL) {
6766 // get DigestBase klass to lookup for SHA klass
6767 const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
6768 assert(tinst != NULL, "digestBase_obj is not instance???");
6769 assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6770
6771 ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6772 assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded");
6773 ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6774 return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit);
6775 }
6776 return false;
6777 }
6778 //------------------------------inline_sha_implCompressMB-----------------------
6779 bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA,
6780 bool long_state, address stubAddr, const char *stubName,
6781 Node* src_start, Node* ofs, Node* limit) {
6782 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA);
6783 const TypeOopPtr* xtype = aklass->as_instance_type();
6784 Node* sha_obj = new CheckCastPPNode(control(), digestBase_obj, xtype);
6785 sha_obj = _gvn.transform(sha_obj);
6786
6787 Node* state;
6788 if (long_state) {
6789 state = get_state_from_sha5_object(sha_obj);
6790 } else {
6791 state = get_state_from_sha_object(sha_obj);
6792 }
6793 if (state == NULL) return false;
6794
6795 // Call the stub.
6796 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
6797 OptoRuntime::digestBase_implCompressMB_Type(),
6798 stubAddr, stubName, TypePtr::BOTTOM,
6799 src_start, state, ofs, limit);
6800 // return ofs (int)
6801 Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
6802 set_result(result);
6803
6804 return true;
6805 }
6806
6807 //------------------------------get_state_from_sha_object-----------------------
6808 Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
6809 Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
6810 assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
6811 if (sha_state == NULL) return (Node *) NULL;
6812
6813 // now have the array, need to get the start address of the state array
6814 sha_state = access_resolve(sha_state, ACCESS_WRITE);
6815 Node* state = array_element_address(sha_state, intcon(0), T_INT);
6816 return state;
6817 }
6818
6819 //------------------------------get_state_from_sha5_object-----------------------
6820 Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
6821 Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
6822 assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
6823 if (sha_state == NULL) return (Node *) NULL;
6824
6825 // now have the array, need to get the start address of the state array
6826 sha_state = access_resolve(sha_state, ACCESS_WRITE);
6827 Node* state = array_element_address(sha_state, intcon(0), T_LONG);
6828 return state;
6829 }
6830
6831 //----------------------------inline_digestBase_implCompressMB_predicate----------------------------
6832 // Return node representing slow path of predicate check.
6833 // the pseudo code we want to emulate with this predicate is:
6834 // if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath
6835 //
6836 Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
6837 assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
6838 "need SHA1/SHA256/SHA512 instruction support");
6839 assert((uint)predicate < 3, "sanity");
6840
6841 // The receiver was checked for NULL already.
6842 Node* digestBaseObj = argument(0);
6843
6844 // get DigestBase klass for instanceOf check
6845 const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
6846 assert(tinst != NULL, "digestBaseObj is null");
6847 assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
6848
6849 const char* klass_SHA_name = NULL;
6850 switch (predicate) {
6851 case 0:
6852 if (UseSHA1Intrinsics) {
6853 // we want to do an instanceof comparison against the SHA class
6854 klass_SHA_name = "sun/security/provider/SHA";
6855 }
6856 break;
6857 case 1:
6858 if (UseSHA256Intrinsics) {
6859 // we want to do an instanceof comparison against the SHA2 class
6860 klass_SHA_name = "sun/security/provider/SHA2";
6861 }
6862 break;
6863 case 2:
6864 if (UseSHA512Intrinsics) {
6865 // we want to do an instanceof comparison against the SHA5 class
6866 klass_SHA_name = "sun/security/provider/SHA5";
6867 }
6868 break;
6869 default:
6870 fatal("unknown SHA intrinsic predicate: %d", predicate);
6871 }
6872
6873 ciKlass* klass_SHA = NULL;
6874 if (klass_SHA_name != NULL) {
6875 klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
6876 }
6877 if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) {
6878 // if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
6879 Node* ctrl = control();
6880 set_control(top()); // no intrinsic path
6881 return ctrl;
6882 }
6883 ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
6884
6885 Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA)));
6886 Node* cmp_instof = _gvn.transform(new CmpINode(instofSHA, intcon(1)));
6887 Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne));
6888 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6889
6890 return instof_false; // even if it is NULL
6891 }
6892
6893 //-------------inline_fma-----------------------------------
6894 bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) {
6895 Node *a = NULL;
6896 Node *b = NULL;
6897 Node *c = NULL;
6898 Node* result = NULL;
6899 switch (id) {
6900 case vmIntrinsics::_fmaD:
6901 assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each.");
6902 // no receiver since it is static method
6903 a = round_double_node(argument(0));
6904 b = round_double_node(argument(2));
6905 c = round_double_node(argument(4));
6906 result = _gvn.transform(new FmaDNode(control(), a, b, c));
6907 break;
6908 case vmIntrinsics::_fmaF:
6909 assert(callee()->signature()->size() == 3, "fma has 3 parameters of size 1 each.");
6910 a = argument(0);
6911 b = argument(1);
6912 c = argument(2);
6913 result = _gvn.transform(new FmaFNode(control(), a, b, c));
6914 break;
6915 default:
6916 fatal_unexpected_iid(id); break;
6917 }
6918 set_result(result);
6919 return true;
6920 }
6921
6922 bool LibraryCallKit::inline_character_compare(vmIntrinsics::ID id) {
6923 // argument(0) is receiver
6924 Node* codePoint = argument(1);
6925 Node* n = NULL;
6926
6927 switch (id) {
6928 case vmIntrinsics::_isDigit :
6929 n = new DigitNode(control(), codePoint);
6930 break;
6931 case vmIntrinsics::_isLowerCase :
6932 n = new LowerCaseNode(control(), codePoint);
6933 break;
6934 case vmIntrinsics::_isUpperCase :
6935 n = new UpperCaseNode(control(), codePoint);
6936 break;
6937 case vmIntrinsics::_isWhitespace :
6938 n = new WhitespaceNode(control(), codePoint);
6939 break;
6940 default:
6941 fatal_unexpected_iid(id);
6942 }
6943
6944 set_result(_gvn.transform(n));
6945 return true;
6946 }
6947
6948 //------------------------------inline_fp_min_max------------------------------
6949 bool LibraryCallKit::inline_fp_min_max(vmIntrinsics::ID id) {
6950 /* DISABLED BECAUSE METHOD DATA ISN'T COLLECTED PER CALL-SITE, SEE JDK-8015416.
6951
6952 // The intrinsic should be used only when the API branches aren't predictable,
6953 // the last one performing the most important comparison. The following heuristic
6954 // uses the branch statistics to eventually bail out if necessary.
6955
6956 ciMethodData *md = callee()->method_data();
6957
6958 if ( md != NULL && md->is_mature() && md->invocation_count() > 0 ) {
6959 ciCallProfile cp = caller()->call_profile_at_bci(bci());
6960
6961 if ( ((double)cp.count()) / ((double)md->invocation_count()) < 0.8 ) {
6962 // Bail out if the call-site didn't contribute enough to the statistics.
6963 return false;
6964 }
6965
6966 uint taken = 0, not_taken = 0;
6967
6968 for (ciProfileData *p = md->first_data(); md->is_valid(p); p = md->next_data(p)) {
6969 if (p->is_BranchData()) {
6970 taken = ((ciBranchData*)p)->taken();
6971 not_taken = ((ciBranchData*)p)->not_taken();
6972 }
6973 }
6974
6975 double balance = (((double)taken) - ((double)not_taken)) / ((double)md->invocation_count());
6976 balance = balance < 0 ? -balance : balance;
6977 if ( balance > 0.2 ) {
6978 // Bail out if the most important branch is predictable enough.
6979 return false;
6980 }
6981 }
6982 */
6983
6984 Node *a = NULL;
6985 Node *b = NULL;
6986 Node *n = NULL;
6987 switch (id) {
6988 case vmIntrinsics::_maxF:
6989 case vmIntrinsics::_minF:
6990 assert(callee()->signature()->size() == 2, "minF/maxF has 2 parameters of size 1 each.");
6991 a = argument(0);
6992 b = argument(1);
6993 break;
6994 case vmIntrinsics::_maxD:
6995 case vmIntrinsics::_minD:
6996 assert(callee()->signature()->size() == 4, "minD/maxD has 2 parameters of size 2 each.");
6997 a = round_double_node(argument(0));
6998 b = round_double_node(argument(2));
6999 break;
7000 default:
7001 fatal_unexpected_iid(id);
7002 break;
7003 }
7004 if (a->is_Con() || b->is_Con()) {
7005 return false;
7006 }
7007 switch (id) {
7008 case vmIntrinsics::_maxF: n = new MaxFNode(a, b); break;
7009 case vmIntrinsics::_minF: n = new MinFNode(a, b); break;
7010 case vmIntrinsics::_maxD: n = new MaxDNode(a, b); break;
7011 case vmIntrinsics::_minD: n = new MinDNode(a, b); break;
7012 default: fatal_unexpected_iid(id); break;
7013 }
7014 set_result(_gvn.transform(n));
7015 return true;
7016 }
7017
7018 bool LibraryCallKit::inline_profileBoolean() {
7019 Node* counts = argument(1);
7020 const TypeAryPtr* ary = NULL;
7021 ciArray* aobj = NULL;
7022 if (counts->is_Con()
7023 && (ary = counts->bottom_type()->isa_aryptr()) != NULL
7024 && (aobj = ary->const_oop()->as_array()) != NULL
7025 && (aobj->length() == 2)) {
7026 // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
7027 jint false_cnt = aobj->element_value(0).as_int();
7028 jint true_cnt = aobj->element_value(1).as_int();
7029
7030 if (C->log() != NULL) {
7031 C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
7032 false_cnt, true_cnt);
7033 }
7034
7035 if (false_cnt + true_cnt == 0) {
7036 // According to profile, never executed.
7037 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
7038 Deoptimization::Action_reinterpret);
7039 return true;
7040 }
7041
7042 // result is a boolean (0 or 1) and its profile (false_cnt & true_cnt)
7043 // is a number of each value occurrences.
7044 Node* result = argument(0);
7045 if (false_cnt == 0 || true_cnt == 0) {
7046 // According to profile, one value has been never seen.
7047 int expected_val = (false_cnt == 0) ? 1 : 0;
7048
7049 Node* cmp = _gvn.transform(new CmpINode(result, intcon(expected_val)));
7050 Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
7051
7052 IfNode* check = create_and_map_if(control(), test, PROB_ALWAYS, COUNT_UNKNOWN);
7053 Node* fast_path = _gvn.transform(new IfTrueNode(check));
7054 Node* slow_path = _gvn.transform(new IfFalseNode(check));
7055
7056 { // Slow path: uncommon trap for never seen value and then reexecute
7057 // MethodHandleImpl::profileBoolean() to bump the count, so JIT knows
7058 // the value has been seen at least once.
7059 PreserveJVMState pjvms(this);
7060 PreserveReexecuteState preexecs(this);
7061 jvms()->set_should_reexecute(true);
7062
7063 set_control(slow_path);
7064 set_i_o(i_o());
7065
7066 uncommon_trap_exact(Deoptimization::Reason_intrinsic,
7067 Deoptimization::Action_reinterpret);
7068 }
7069 // The guard for never seen value enables sharpening of the result and
7070 // returning a constant. It allows to eliminate branches on the same value
7071 // later on.
7072 set_control(fast_path);
7073 result = intcon(expected_val);
7074 }
7075 // Stop profiling.
7076 // MethodHandleImpl::profileBoolean() has profiling logic in its bytecode.
7077 // By replacing method body with profile data (represented as ProfileBooleanNode
7078 // on IR level) we effectively disable profiling.
7079 // It enables full speed execution once optimized code is generated.
7080 Node* profile = _gvn.transform(new ProfileBooleanNode(result, false_cnt, true_cnt));
7081 C->record_for_igvn(profile);
7082 set_result(profile);
7083 return true;
7084 } else {
7085 // Continue profiling.
7086 // Profile data isn't available at the moment. So, execute method's bytecode version.
7087 // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
7088 // is compiled and counters aren't available since corresponding MethodHandle
7089 // isn't a compile-time constant.
7090 return false;
7091 }
7092 }
7093
7094 bool LibraryCallKit::inline_isCompileConstant() {
7095 Node* n = argument(0);
7096 set_result(n->is_Con() ? intcon(1) : intcon(0));
7097 return true;
7098 }
--- EOF ---