Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/opto/library_call.cpp
+++ new/src/share/vm/opto/library_call.cpp
1 1 /*
2 2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/systemDictionary.hpp"
27 27 #include "classfile/vmSymbols.hpp"
28 28 #include "compiler/compileBroker.hpp"
29 29 #include "compiler/compileLog.hpp"
30 30 #include "oops/objArrayKlass.hpp"
31 31 #include "opto/addnode.hpp"
32 32 #include "opto/callGenerator.hpp"
33 33 #include "opto/cfgnode.hpp"
34 34 #include "opto/idealKit.hpp"
35 35 #include "opto/mathexactnode.hpp"
36 36 #include "opto/mulnode.hpp"
37 37 #include "opto/parse.hpp"
38 38 #include "opto/runtime.hpp"
39 39 #include "opto/subnode.hpp"
40 40 #include "prims/nativeLookup.hpp"
41 41 #include "runtime/sharedRuntime.hpp"
42 42 #include "trace/traceMacros.hpp"
43 43
44 44 class LibraryIntrinsic : public InlineCallGenerator {
45 45 // Extend the set of intrinsics known to the runtime:
46 46 public:
47 47 private:
48 48 bool _is_virtual;
49 49 bool _is_predicted;
50 50 bool _does_virtual_dispatch;
51 51 vmIntrinsics::ID _intrinsic_id;
52 52
53 53 public:
54 54 LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id)
55 55 : InlineCallGenerator(m),
56 56 _is_virtual(is_virtual),
57 57 _is_predicted(is_predicted),
58 58 _does_virtual_dispatch(does_virtual_dispatch),
59 59 _intrinsic_id(id)
60 60 {
61 61 }
62 62 virtual bool is_intrinsic() const { return true; }
63 63 virtual bool is_virtual() const { return _is_virtual; }
64 64 virtual bool is_predicted() const { return _is_predicted; }
65 65 virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
66 66 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
67 67 virtual Node* generate_predicate(JVMState* jvms);
68 68 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
69 69 };
70 70
71 71
72 72 // Local helper class for LibraryIntrinsic:
73 73 class LibraryCallKit : public GraphKit {
74 74 private:
75 75 LibraryIntrinsic* _intrinsic; // the library intrinsic being called
76 76 Node* _result; // the result node, if any
77 77 int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted
78 78
79 79 const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
80 80
81 81 public:
82 82 LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
83 83 : GraphKit(jvms),
84 84 _intrinsic(intrinsic),
85 85 _result(NULL)
86 86 {
87 87 // Check if this is a root compile. In that case we don't have a caller.
88 88 if (!jvms->has_method()) {
89 89 _reexecute_sp = sp();
90 90 } else {
91 91 // Find out how many arguments the interpreter needs when deoptimizing
92 92 // and save the stack pointer value so it can used by uncommon_trap.
93 93 // We find the argument count by looking at the declared signature.
94 94 bool ignored_will_link;
95 95 ciSignature* declared_signature = NULL;
96 96 ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
97 97 const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
98 98 _reexecute_sp = sp() + nargs; // "push" arguments back on stack
99 99 }
100 100 }
101 101
102 102 virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
103 103
104 104 ciMethod* caller() const { return jvms()->method(); }
105 105 int bci() const { return jvms()->bci(); }
106 106 LibraryIntrinsic* intrinsic() const { return _intrinsic; }
107 107 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); }
108 108 ciMethod* callee() const { return _intrinsic->method(); }
109 109
110 110 bool try_to_inline();
111 111 Node* try_to_predicate();
112 112
113 113 void push_result() {
114 114 // Push the result onto the stack.
115 115 if (!stopped() && result() != NULL) {
116 116 BasicType bt = result()->bottom_type()->basic_type();
117 117 push_node(bt, result());
118 118 }
119 119 }
120 120
121 121 private:
122 122 void fatal_unexpected_iid(vmIntrinsics::ID iid) {
123 123 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
124 124 }
125 125
126 126 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
127 127 void set_result(RegionNode* region, PhiNode* value);
128 128 Node* result() { return _result; }
129 129
130 130 virtual int reexecute_sp() { return _reexecute_sp; }
131 131
132 132 // Helper functions to inline natives
133 133 Node* generate_guard(Node* test, RegionNode* region, float true_prob);
134 134 Node* generate_slow_guard(Node* test, RegionNode* region);
135 135 Node* generate_fair_guard(Node* test, RegionNode* region);
136 136 Node* generate_negative_guard(Node* index, RegionNode* region,
137 137 // resulting CastII of index:
138 138 Node* *pos_index = NULL);
139 139 Node* generate_nonpositive_guard(Node* index, bool never_negative,
140 140 // resulting CastII of index:
141 141 Node* *pos_index = NULL);
142 142 Node* generate_limit_guard(Node* offset, Node* subseq_length,
143 143 Node* array_length,
144 144 RegionNode* region);
145 145 Node* generate_current_thread(Node* &tls_output);
146 146 address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
147 147 bool disjoint_bases, const char* &name, bool dest_uninitialized);
148 148 Node* load_mirror_from_klass(Node* klass);
149 149 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
150 150 RegionNode* region, int null_path,
151 151 int offset);
152 152 Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
153 153 RegionNode* region, int null_path) {
154 154 int offset = java_lang_Class::klass_offset_in_bytes();
155 155 return load_klass_from_mirror_common(mirror, never_see_null,
156 156 region, null_path,
157 157 offset);
158 158 }
159 159 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
160 160 RegionNode* region, int null_path) {
161 161 int offset = java_lang_Class::array_klass_offset_in_bytes();
162 162 return load_klass_from_mirror_common(mirror, never_see_null,
163 163 region, null_path,
164 164 offset);
165 165 }
166 166 Node* generate_access_flags_guard(Node* kls,
167 167 int modifier_mask, int modifier_bits,
168 168 RegionNode* region);
169 169 Node* generate_interface_guard(Node* kls, RegionNode* region);
170 170 Node* generate_array_guard(Node* kls, RegionNode* region) {
171 171 return generate_array_guard_common(kls, region, false, false);
172 172 }
173 173 Node* generate_non_array_guard(Node* kls, RegionNode* region) {
174 174 return generate_array_guard_common(kls, region, false, true);
175 175 }
176 176 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
177 177 return generate_array_guard_common(kls, region, true, false);
178 178 }
179 179 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
180 180 return generate_array_guard_common(kls, region, true, true);
181 181 }
182 182 Node* generate_array_guard_common(Node* kls, RegionNode* region,
183 183 bool obj_array, bool not_array);
184 184 Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
185 185 CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
186 186 bool is_virtual = false, bool is_static = false);
187 187 CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
188 188 return generate_method_call(method_id, false, true);
189 189 }
190 190 CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
191 191 return generate_method_call(method_id, true, false);
192 192 }
193 193 Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static);
194 194
195 195 Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2);
196 196 Node* make_string_method_node(int opcode, Node* str1, Node* str2);
197 197 bool inline_string_compareTo();
198 198 bool inline_string_indexOf();
199 199 Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
200 200 bool inline_string_equals();
201 201 Node* round_double_node(Node* n);
202 202 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
203 203 bool inline_math_native(vmIntrinsics::ID id);
204 204 bool inline_trig(vmIntrinsics::ID id);
205 205 bool inline_math(vmIntrinsics::ID id);
206 206 template <typename OverflowOp>
207 207 bool inline_math_overflow(Node* arg1, Node* arg2);
208 208 void inline_math_mathExact(Node* math, Node* test);
209 209 bool inline_math_addExactI(bool is_increment);
210 210 bool inline_math_addExactL(bool is_increment);
211 211 bool inline_math_multiplyExactI();
212 212 bool inline_math_multiplyExactL();
213 213 bool inline_math_negateExactI();
214 214 bool inline_math_negateExactL();
215 215 bool inline_math_subtractExactI(bool is_decrement);
216 216 bool inline_math_subtractExactL(bool is_decrement);
217 217 bool inline_exp();
218 218 bool inline_pow();
219 219 void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
220 220 bool inline_min_max(vmIntrinsics::ID id);
221 221 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
222 222 // This returns Type::AnyPtr, RawPtr, or OopPtr.
223 223 int classify_unsafe_addr(Node* &base, Node* &offset);
224 224 Node* make_unsafe_address(Node* base, Node* offset);
225 225 // Helper for inline_unsafe_access.
226 226 // Generates the guards that check whether the result of
227 227 // Unsafe.getObject should be recorded in an SATB log buffer.
228 228 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
229 229 bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
230 230 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
231 231 static bool klass_needs_init_guard(Node* kls);
232 232 bool inline_unsafe_allocate();
233 233 bool inline_unsafe_copyMemory();
234 234 bool inline_native_currentThread();
235 235 #ifdef TRACE_HAVE_INTRINSICS
236 236 bool inline_native_classID();
237 237 bool inline_native_threadID();
238 238 #endif
239 239 bool inline_native_time_funcs(address method, const char* funcName);
240 240 bool inline_native_isInterrupted();
241 241 bool inline_native_Class_query(vmIntrinsics::ID id);
242 242 bool inline_native_subtype_check();
243 243
244 244 bool inline_native_newArray();
245 245 bool inline_native_getLength();
246 246 bool inline_array_copyOf(bool is_copyOfRange);
247 247 bool inline_array_equals();
248 248 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
249 249 bool inline_native_clone(bool is_virtual);
250 250 bool inline_native_Reflection_getCallerClass();
251 251 // Helper function for inlining native object hash method
252 252 bool inline_native_hashcode(bool is_virtual, bool is_static);
253 253 bool inline_native_getClass();
254 254
255 255 // Helper functions for inlining arraycopy
256 256 bool inline_arraycopy();
257 257 void generate_arraycopy(const TypePtr* adr_type,
258 258 BasicType basic_elem_type,
259 259 Node* src, Node* src_offset,
260 260 Node* dest, Node* dest_offset,
261 261 Node* copy_length,
262 262 bool disjoint_bases = false,
263 263 bool length_never_negative = false,
264 264 RegionNode* slow_region = NULL);
265 265 AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
266 266 RegionNode* slow_region);
267 267 void generate_clear_array(const TypePtr* adr_type,
268 268 Node* dest,
269 269 BasicType basic_elem_type,
270 270 Node* slice_off,
271 271 Node* slice_len,
272 272 Node* slice_end);
273 273 bool generate_block_arraycopy(const TypePtr* adr_type,
274 274 BasicType basic_elem_type,
275 275 AllocateNode* alloc,
276 276 Node* src, Node* src_offset,
277 277 Node* dest, Node* dest_offset,
278 278 Node* dest_size, bool dest_uninitialized);
279 279 void generate_slow_arraycopy(const TypePtr* adr_type,
280 280 Node* src, Node* src_offset,
281 281 Node* dest, Node* dest_offset,
282 282 Node* copy_length, bool dest_uninitialized);
283 283 Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
284 284 Node* dest_elem_klass,
285 285 Node* src, Node* src_offset,
286 286 Node* dest, Node* dest_offset,
287 287 Node* copy_length, bool dest_uninitialized);
288 288 Node* generate_generic_arraycopy(const TypePtr* adr_type,
289 289 Node* src, Node* src_offset,
290 290 Node* dest, Node* dest_offset,
291 291 Node* copy_length, bool dest_uninitialized);
292 292 void generate_unchecked_arraycopy(const TypePtr* adr_type,
293 293 BasicType basic_elem_type,
294 294 bool disjoint_bases,
295 295 Node* src, Node* src_offset,
296 296 Node* dest, Node* dest_offset,
297 297 Node* copy_length, bool dest_uninitialized);
298 298 typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
299 299 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind);
300 300 bool inline_unsafe_ordered_store(BasicType type);
301 301 bool inline_unsafe_fence(vmIntrinsics::ID id);
302 302 bool inline_fp_conversions(vmIntrinsics::ID id);
303 303 bool inline_number_methods(vmIntrinsics::ID id);
304 304 bool inline_reference_get();
305 305 bool inline_aescrypt_Block(vmIntrinsics::ID id);
306 306 bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
307 307 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
308 308 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
309 309 Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
310 310 bool inline_encodeISOArray();
311 311 bool inline_updateCRC32();
312 312 bool inline_updateBytesCRC32();
313 313 bool inline_updateByteBufferCRC32();
314 314 };
315 315
316 316
317 317 //---------------------------make_vm_intrinsic----------------------------
318 318 CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
319 319 vmIntrinsics::ID id = m->intrinsic_id();
320 320 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
321 321
322 322 if (DisableIntrinsic[0] != '\0'
323 323 && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) {
324 324 // disabled by a user request on the command line:
325 325 // example: -XX:DisableIntrinsic=_hashCode,_getClass
326 326 return NULL;
327 327 }
328 328
329 329 if (!m->is_loaded()) {
330 330 // do not attempt to inline unloaded methods
331 331 return NULL;
332 332 }
333 333
334 334 // Only a few intrinsics implement a virtual dispatch.
335 335 // They are expensive calls which are also frequently overridden.
336 336 if (is_virtual) {
337 337 switch (id) {
338 338 case vmIntrinsics::_hashCode:
339 339 case vmIntrinsics::_clone:
340 340 // OK, Object.hashCode and Object.clone intrinsics come in both flavors
341 341 break;
342 342 default:
343 343 return NULL;
344 344 }
345 345 }
346 346
347 347 // -XX:-InlineNatives disables nearly all intrinsics:
348 348 if (!InlineNatives) {
349 349 switch (id) {
350 350 case vmIntrinsics::_indexOf:
351 351 case vmIntrinsics::_compareTo:
352 352 case vmIntrinsics::_equals:
353 353 case vmIntrinsics::_equalsC:
354 354 case vmIntrinsics::_getAndAddInt:
355 355 case vmIntrinsics::_getAndAddLong:
356 356 case vmIntrinsics::_getAndSetInt:
357 357 case vmIntrinsics::_getAndSetLong:
358 358 case vmIntrinsics::_getAndSetObject:
359 359 case vmIntrinsics::_loadFence:
360 360 case vmIntrinsics::_storeFence:
361 361 case vmIntrinsics::_fullFence:
362 362 break; // InlineNatives does not control String.compareTo
363 363 case vmIntrinsics::_Reference_get:
364 364 break; // InlineNatives does not control Reference.get
365 365 default:
366 366 return NULL;
367 367 }
368 368 }
369 369
370 370 bool is_predicted = false;
371 371 bool does_virtual_dispatch = false;
372 372
373 373 switch (id) {
374 374 case vmIntrinsics::_compareTo:
375 375 if (!SpecialStringCompareTo) return NULL;
376 376 if (!Matcher::match_rule_supported(Op_StrComp)) return NULL;
377 377 break;
378 378 case vmIntrinsics::_indexOf:
379 379 if (!SpecialStringIndexOf) return NULL;
380 380 break;
381 381 case vmIntrinsics::_equals:
382 382 if (!SpecialStringEquals) return NULL;
383 383 if (!Matcher::match_rule_supported(Op_StrEquals)) return NULL;
384 384 break;
385 385 case vmIntrinsics::_equalsC:
386 386 if (!SpecialArraysEquals) return NULL;
387 387 if (!Matcher::match_rule_supported(Op_AryEq)) return NULL;
388 388 break;
389 389 case vmIntrinsics::_arraycopy:
390 390 if (!InlineArrayCopy) return NULL;
391 391 break;
392 392 case vmIntrinsics::_copyMemory:
393 393 if (StubRoutines::unsafe_arraycopy() == NULL) return NULL;
394 394 if (!InlineArrayCopy) return NULL;
395 395 break;
396 396 case vmIntrinsics::_hashCode:
397 397 if (!InlineObjectHash) return NULL;
398 398 does_virtual_dispatch = true;
399 399 break;
400 400 case vmIntrinsics::_clone:
401 401 does_virtual_dispatch = true;
402 402 case vmIntrinsics::_copyOf:
403 403 case vmIntrinsics::_copyOfRange:
404 404 if (!InlineObjectCopy) return NULL;
405 405 // These also use the arraycopy intrinsic mechanism:
406 406 if (!InlineArrayCopy) return NULL;
407 407 break;
408 408 case vmIntrinsics::_encodeISOArray:
409 409 if (!SpecialEncodeISOArray) return NULL;
410 410 if (!Matcher::match_rule_supported(Op_EncodeISOArray)) return NULL;
411 411 break;
412 412 case vmIntrinsics::_checkIndex:
413 413 // We do not intrinsify this. The optimizer does fine with it.
414 414 return NULL;
415 415
416 416 case vmIntrinsics::_getCallerClass:
417 417 if (!UseNewReflection) return NULL;
418 418 if (!InlineReflectionGetCallerClass) return NULL;
419 419 if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) return NULL;
420 420 break;
421 421
422 422 case vmIntrinsics::_bitCount_i:
423 423 if (!Matcher::match_rule_supported(Op_PopCountI)) return NULL;
424 424 break;
425 425
426 426 case vmIntrinsics::_bitCount_l:
427 427 if (!Matcher::match_rule_supported(Op_PopCountL)) return NULL;
428 428 break;
429 429
430 430 case vmIntrinsics::_numberOfLeadingZeros_i:
431 431 if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return NULL;
432 432 break;
433 433
434 434 case vmIntrinsics::_numberOfLeadingZeros_l:
435 435 if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
436 436 break;
437 437
438 438 case vmIntrinsics::_numberOfTrailingZeros_i:
439 439 if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
440 440 break;
441 441
442 442 case vmIntrinsics::_numberOfTrailingZeros_l:
443 443 if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
444 444 break;
445 445
446 446 case vmIntrinsics::_reverseBytes_c:
447 447 if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return NULL;
448 448 break;
449 449 case vmIntrinsics::_reverseBytes_s:
450 450 if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return NULL;
451 451 break;
452 452 case vmIntrinsics::_reverseBytes_i:
453 453 if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return NULL;
454 454 break;
455 455 case vmIntrinsics::_reverseBytes_l:
456 456 if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return NULL;
457 457 break;
458 458
459 459 case vmIntrinsics::_Reference_get:
460 460 // Use the intrinsic version of Reference.get() so that the value in
461 461 // the referent field can be registered by the G1 pre-barrier code.
462 462 // Also add memory barrier to prevent commoning reads from this field
463 463 // across safepoint since GC can change it value.
464 464 break;
465 465
466 466 case vmIntrinsics::_compareAndSwapObject:
467 467 #ifdef _LP64
468 468 if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return NULL;
469 469 #endif
470 470 break;
471 471
472 472 case vmIntrinsics::_compareAndSwapLong:
473 473 if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return NULL;
474 474 break;
475 475
476 476 case vmIntrinsics::_getAndAddInt:
477 477 if (!Matcher::match_rule_supported(Op_GetAndAddI)) return NULL;
478 478 break;
479 479
480 480 case vmIntrinsics::_getAndAddLong:
481 481 if (!Matcher::match_rule_supported(Op_GetAndAddL)) return NULL;
482 482 break;
483 483
484 484 case vmIntrinsics::_getAndSetInt:
485 485 if (!Matcher::match_rule_supported(Op_GetAndSetI)) return NULL;
486 486 break;
487 487
488 488 case vmIntrinsics::_getAndSetLong:
489 489 if (!Matcher::match_rule_supported(Op_GetAndSetL)) return NULL;
490 490 break;
491 491
492 492 case vmIntrinsics::_getAndSetObject:
493 493 #ifdef _LP64
494 494 if (!UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
495 495 if (UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetN)) return NULL;
496 496 break;
497 497 #else
498 498 if (!Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
499 499 break;
500 500 #endif
501 501
502 502 case vmIntrinsics::_aescrypt_encryptBlock:
503 503 case vmIntrinsics::_aescrypt_decryptBlock:
504 504 if (!UseAESIntrinsics) return NULL;
505 505 break;
506 506
507 507 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
508 508 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
509 509 if (!UseAESIntrinsics) return NULL;
510 510 // these two require the predicated logic
511 511 is_predicted = true;
512 512 break;
513 513
514 514 case vmIntrinsics::_updateCRC32:
515 515 case vmIntrinsics::_updateBytesCRC32:
516 516 case vmIntrinsics::_updateByteBufferCRC32:
517 517 if (!UseCRC32Intrinsics) return NULL;
518 518 break;
519 519
520 520 case vmIntrinsics::_incrementExactI:
521 521 case vmIntrinsics::_addExactI:
522 522 if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL;
523 523 break;
524 524 case vmIntrinsics::_incrementExactL:
525 525 case vmIntrinsics::_addExactL:
526 526 if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL;
527 527 break;
528 528 case vmIntrinsics::_decrementExactI:
529 529 case vmIntrinsics::_subtractExactI:
530 530 if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
531 531 break;
532 532 case vmIntrinsics::_decrementExactL:
533 533 case vmIntrinsics::_subtractExactL:
534 534 if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
535 535 break;
536 536 case vmIntrinsics::_negateExactI:
537 537 if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
538 538 break;
539 539 case vmIntrinsics::_negateExactL:
540 540 if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
541 541 break;
542 542 case vmIntrinsics::_multiplyExactI:
543 543 if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL;
544 544 break;
545 545 case vmIntrinsics::_multiplyExactL:
546 546 if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL;
547 547 break;
548 548
549 549 default:
550 550 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
551 551 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
552 552 break;
553 553 }
554 554
555 555 // -XX:-InlineClassNatives disables natives from the Class class.
556 556 // The flag applies to all reflective calls, notably Array.newArray
557 557 // (visible to Java programmers as Array.newInstance).
558 558 if (m->holder()->name() == ciSymbol::java_lang_Class() ||
559 559 m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
560 560 if (!InlineClassNatives) return NULL;
561 561 }
562 562
563 563 // -XX:-InlineThreadNatives disables natives from the Thread class.
564 564 if (m->holder()->name() == ciSymbol::java_lang_Thread()) {
565 565 if (!InlineThreadNatives) return NULL;
566 566 }
567 567
568 568 // -XX:-InlineMathNatives disables natives from the Math,Float and Double classes.
569 569 if (m->holder()->name() == ciSymbol::java_lang_Math() ||
570 570 m->holder()->name() == ciSymbol::java_lang_Float() ||
571 571 m->holder()->name() == ciSymbol::java_lang_Double()) {
572 572 if (!InlineMathNatives) return NULL;
573 573 }
574 574
575 575 // -XX:-InlineUnsafeOps disables natives from the Unsafe class.
576 576 if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) {
577 577 if (!InlineUnsafeOps) return NULL;
578 578 }
579 579
580 580 return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id);
581 581 }
582 582
583 583 //----------------------register_library_intrinsics-----------------------
584 584 // Initialize this file's data structures, for each Compile instance.
585 585 void Compile::register_library_intrinsics() {
586 586 // Nothing to do here.
587 587 }
588 588
589 589 JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
590 590 LibraryCallKit kit(jvms, this);
591 591 Compile* C = kit.C;
592 592 int nodes = C->unique();
593 593 #ifndef PRODUCT
594 594 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
595 595 char buf[1000];
596 596 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
597 597 tty->print_cr("Intrinsic %s", str);
598 598 }
599 599 #endif
600 600 ciMethod* callee = kit.callee();
601 601 const int bci = kit.bci();
602 602
603 603 // Try to inline the intrinsic.
604 604 if (kit.try_to_inline()) {
605 605 if (C->print_intrinsics() || C->print_inlining()) {
606 606 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
607 607 }
608 608 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
609 609 if (C->log()) {
610 610 C->log()->elem("intrinsic id='%s'%s nodes='%d'",
611 611 vmIntrinsics::name_at(intrinsic_id()),
612 612 (is_virtual() ? " virtual='1'" : ""),
613 613 C->unique() - nodes);
614 614 }
615 615 // Push the result from the inlined method onto the stack.
616 616 kit.push_result();
617 617 return kit.transfer_exceptions_into_jvms();
618 618 }
619 619
620 620 // The intrinsic bailed out
621 621 if (C->print_intrinsics() || C->print_inlining()) {
622 622 if (jvms->has_method()) {
623 623 // Not a root compile.
624 624 const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
625 625 C->print_inlining(callee, jvms->depth() - 1, bci, msg);
626 626 } else {
627 627 // Root compile
628 628 tty->print("Did not generate intrinsic %s%s at bci:%d in",
629 629 vmIntrinsics::name_at(intrinsic_id()),
630 630 (is_virtual() ? " (virtual)" : ""), bci);
631 631 }
632 632 }
633 633 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
634 634 return NULL;
635 635 }
636 636
637 637 Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
638 638 LibraryCallKit kit(jvms, this);
639 639 Compile* C = kit.C;
640 640 int nodes = C->unique();
641 641 #ifndef PRODUCT
642 642 assert(is_predicted(), "sanity");
643 643 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
644 644 char buf[1000];
645 645 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
646 646 tty->print_cr("Predicate for intrinsic %s", str);
647 647 }
648 648 #endif
649 649 ciMethod* callee = kit.callee();
650 650 const int bci = kit.bci();
651 651
652 652 Node* slow_ctl = kit.try_to_predicate();
653 653 if (!kit.failing()) {
654 654 if (C->print_intrinsics() || C->print_inlining()) {
655 655 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
656 656 }
657 657 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
658 658 if (C->log()) {
659 659 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
660 660 vmIntrinsics::name_at(intrinsic_id()),
661 661 (is_virtual() ? " virtual='1'" : ""),
662 662 C->unique() - nodes);
663 663 }
664 664 return slow_ctl; // Could be NULL if the check folds.
665 665 }
666 666
667 667 // The intrinsic bailed out
668 668 if (C->print_intrinsics() || C->print_inlining()) {
669 669 if (jvms->has_method()) {
670 670 // Not a root compile.
671 671 const char* msg = "failed to generate predicate for intrinsic";
672 672 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
673 673 } else {
674 674 // Root compile
675 675 C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
676 676 vmIntrinsics::name_at(intrinsic_id()),
677 677 (is_virtual() ? " (virtual)" : ""), bci);
678 678 }
679 679 }
680 680 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
681 681 return NULL;
682 682 }
683 683
684 684 bool LibraryCallKit::try_to_inline() {
685 685 // Handle symbolic names for otherwise undistinguished boolean switches:
686 686 const bool is_store = true;
687 687 const bool is_native_ptr = true;
688 688 const bool is_static = true;
689 689 const bool is_volatile = true;
690 690
691 691 if (!jvms()->has_method()) {
692 692 // Root JVMState has a null method.
693 693 assert(map()->memory()->Opcode() == Op_Parm, "");
694 694 // Insert the memory aliasing node
695 695 set_all_memory(reset_memory());
696 696 }
697 697 assert(merged_memory(), "");
698 698
699 699
700 700 switch (intrinsic_id()) {
701 701 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
702 702 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static);
703 703 case vmIntrinsics::_getClass: return inline_native_getClass();
704 704
705 705 case vmIntrinsics::_dsin:
706 706 case vmIntrinsics::_dcos:
707 707 case vmIntrinsics::_dtan:
708 708 case vmIntrinsics::_dabs:
709 709 case vmIntrinsics::_datan2:
710 710 case vmIntrinsics::_dsqrt:
711 711 case vmIntrinsics::_dexp:
712 712 case vmIntrinsics::_dlog:
713 713 case vmIntrinsics::_dlog10:
714 714 case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id());
715 715
716 716 case vmIntrinsics::_min:
717 717 case vmIntrinsics::_max: return inline_min_max(intrinsic_id());
718 718
719 719 case vmIntrinsics::_addExactI: return inline_math_addExactI(false /* add */);
720 720 case vmIntrinsics::_addExactL: return inline_math_addExactL(false /* add */);
721 721 case vmIntrinsics::_decrementExactI: return inline_math_subtractExactI(true /* decrement */);
722 722 case vmIntrinsics::_decrementExactL: return inline_math_subtractExactL(true /* decrement */);
723 723 case vmIntrinsics::_incrementExactI: return inline_math_addExactI(true /* increment */);
724 724 case vmIntrinsics::_incrementExactL: return inline_math_addExactL(true /* increment */);
725 725 case vmIntrinsics::_multiplyExactI: return inline_math_multiplyExactI();
726 726 case vmIntrinsics::_multiplyExactL: return inline_math_multiplyExactL();
727 727 case vmIntrinsics::_negateExactI: return inline_math_negateExactI();
728 728 case vmIntrinsics::_negateExactL: return inline_math_negateExactL();
729 729 case vmIntrinsics::_subtractExactI: return inline_math_subtractExactI(false /* subtract */);
730 730 case vmIntrinsics::_subtractExactL: return inline_math_subtractExactL(false /* subtract */);
731 731
732 732 case vmIntrinsics::_arraycopy: return inline_arraycopy();
733 733
734 734 case vmIntrinsics::_compareTo: return inline_string_compareTo();
735 735 case vmIntrinsics::_indexOf: return inline_string_indexOf();
736 736 case vmIntrinsics::_equals: return inline_string_equals();
737 737
738 738 case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile);
739 739 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile);
740 740 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile);
741 741 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile);
742 742 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile);
743 743 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile);
744 744 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile);
745 745 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile);
746 746 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile);
747 747
748 748 case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile);
749 749 case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile);
750 750 case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile);
751 751 case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile);
752 752 case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile);
753 753 case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile);
754 754 case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile);
755 755 case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile);
756 756 case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile);
757 757
758 758 case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile);
759 759 case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile);
760 760 case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile);
761 761 case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile);
762 762 case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile);
763 763 case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile);
764 764 case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile);
765 765 case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile);
766 766
767 767 case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile);
768 768 case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile);
769 769 case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile);
770 770 case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile);
771 771 case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile);
772 772 case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile);
773 773 case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile);
774 774 case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile);
775 775
776 776 case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile);
777 777 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile);
778 778 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile);
779 779 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile);
780 780 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile);
781 781 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile);
782 782 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile);
783 783 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile);
784 784 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile);
785 785
786 786 case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile);
787 787 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile);
788 788 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile);
789 789 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile);
790 790 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile);
791 791 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile);
792 792 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile);
793 793 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile);
794 794 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile);
795 795
796 796 case vmIntrinsics::_prefetchRead: return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
797 797 case vmIntrinsics::_prefetchWrite: return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
798 798 case vmIntrinsics::_prefetchReadStatic: return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
799 799 case vmIntrinsics::_prefetchWriteStatic: return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
800 800
801 801 case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
802 802 case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg);
803 803 case vmIntrinsics::_compareAndSwapLong: return inline_unsafe_load_store(T_LONG, LS_cmpxchg);
804 804
805 805 case vmIntrinsics::_putOrderedObject: return inline_unsafe_ordered_store(T_OBJECT);
806 806 case vmIntrinsics::_putOrderedInt: return inline_unsafe_ordered_store(T_INT);
807 807 case vmIntrinsics::_putOrderedLong: return inline_unsafe_ordered_store(T_LONG);
808 808
809 809 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_xadd);
810 810 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_xadd);
811 811 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_xchg);
812 812 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_xchg);
813 813 case vmIntrinsics::_getAndSetObject: return inline_unsafe_load_store(T_OBJECT, LS_xchg);
814 814
815 815 case vmIntrinsics::_loadFence:
816 816 case vmIntrinsics::_storeFence:
817 817 case vmIntrinsics::_fullFence: return inline_unsafe_fence(intrinsic_id());
818 818
819 819 case vmIntrinsics::_currentThread: return inline_native_currentThread();
820 820 case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted();
821 821
822 822 #ifdef TRACE_HAVE_INTRINSICS
823 823 case vmIntrinsics::_classID: return inline_native_classID();
824 824 case vmIntrinsics::_threadID: return inline_native_threadID();
825 825 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
826 826 #endif
827 827 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
828 828 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
829 829 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate();
830 830 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory();
831 831 case vmIntrinsics::_newArray: return inline_native_newArray();
832 832 case vmIntrinsics::_getLength: return inline_native_getLength();
833 833 case vmIntrinsics::_copyOf: return inline_array_copyOf(false);
834 834 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true);
835 835 case vmIntrinsics::_equalsC: return inline_array_equals();
836 836 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual());
837 837
838 838 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check();
839 839
840 840 case vmIntrinsics::_isInstance:
841 841 case vmIntrinsics::_getModifiers:
842 842 case vmIntrinsics::_isInterface:
843 843 case vmIntrinsics::_isArray:
844 844 case vmIntrinsics::_isPrimitive:
845 845 case vmIntrinsics::_getSuperclass:
846 846 case vmIntrinsics::_getComponentType:
847 847 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id());
848 848
849 849 case vmIntrinsics::_floatToRawIntBits:
850 850 case vmIntrinsics::_floatToIntBits:
851 851 case vmIntrinsics::_intBitsToFloat:
852 852 case vmIntrinsics::_doubleToRawLongBits:
853 853 case vmIntrinsics::_doubleToLongBits:
854 854 case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id());
855 855
856 856 case vmIntrinsics::_numberOfLeadingZeros_i:
857 857 case vmIntrinsics::_numberOfLeadingZeros_l:
858 858 case vmIntrinsics::_numberOfTrailingZeros_i:
859 859 case vmIntrinsics::_numberOfTrailingZeros_l:
860 860 case vmIntrinsics::_bitCount_i:
861 861 case vmIntrinsics::_bitCount_l:
862 862 case vmIntrinsics::_reverseBytes_i:
863 863 case vmIntrinsics::_reverseBytes_l:
864 864 case vmIntrinsics::_reverseBytes_s:
865 865 case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id());
866 866
867 867 case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass();
868 868
869 869 case vmIntrinsics::_Reference_get: return inline_reference_get();
870 870
871 871 case vmIntrinsics::_aescrypt_encryptBlock:
872 872 case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id());
873 873
874 874 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
875 875 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
876 876 return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
877 877
878 878 case vmIntrinsics::_encodeISOArray:
879 879 return inline_encodeISOArray();
880 880
881 881 case vmIntrinsics::_updateCRC32:
882 882 return inline_updateCRC32();
883 883 case vmIntrinsics::_updateBytesCRC32:
884 884 return inline_updateBytesCRC32();
885 885 case vmIntrinsics::_updateByteBufferCRC32:
886 886 return inline_updateByteBufferCRC32();
887 887
888 888 default:
889 889 // If you get here, it may be that someone has added a new intrinsic
890 890 // to the list in vmSymbols.hpp without implementing it here.
891 891 #ifndef PRODUCT
892 892 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
893 893 tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
894 894 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
895 895 }
896 896 #endif
897 897 return false;
898 898 }
899 899 }
900 900
901 901 Node* LibraryCallKit::try_to_predicate() {
902 902 if (!jvms()->has_method()) {
903 903 // Root JVMState has a null method.
904 904 assert(map()->memory()->Opcode() == Op_Parm, "");
905 905 // Insert the memory aliasing node
906 906 set_all_memory(reset_memory());
907 907 }
908 908 assert(merged_memory(), "");
909 909
910 910 switch (intrinsic_id()) {
911 911 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
912 912 return inline_cipherBlockChaining_AESCrypt_predicate(false);
913 913 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
914 914 return inline_cipherBlockChaining_AESCrypt_predicate(true);
915 915
916 916 default:
917 917 // If you get here, it may be that someone has added a new intrinsic
918 918 // to the list in vmSymbols.hpp without implementing it here.
919 919 #ifndef PRODUCT
920 920 if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
921 921 tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
922 922 vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
923 923 }
924 924 #endif
925 925 Node* slow_ctl = control();
926 926 set_control(top()); // No fast path instrinsic
927 927 return slow_ctl;
928 928 }
929 929 }
930 930
931 931 //------------------------------set_result-------------------------------
932 932 // Helper function for finishing intrinsics.
933 933 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
934 934 record_for_igvn(region);
935 935 set_control(_gvn.transform(region));
936 936 set_result( _gvn.transform(value));
937 937 assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
938 938 }
939 939
940 940 //------------------------------generate_guard---------------------------
941 941 // Helper function for generating guarded fast-slow graph structures.
942 942 // The given 'test', if true, guards a slow path. If the test fails
943 943 // then a fast path can be taken. (We generally hope it fails.)
944 944 // In all cases, GraphKit::control() is updated to the fast path.
945 945 // The returned value represents the control for the slow path.
946 946 // The return value is never 'top'; it is either a valid control
947 947 // or NULL if it is obvious that the slow path can never be taken.
948 948 // Also, if region and the slow control are not NULL, the slow edge
949 949 // is appended to the region.
950 950 Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
951 951 if (stopped()) {
952 952 // Already short circuited.
953 953 return NULL;
954 954 }
955 955
956 956 // Build an if node and its projections.
957 957 // If test is true we take the slow path, which we assume is uncommon.
958 958 if (_gvn.type(test) == TypeInt::ZERO) {
959 959 // The slow branch is never taken. No need to build this guard.
960 960 return NULL;
961 961 }
962 962
963 963 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
964 964
965 965 Node* if_slow = _gvn.transform(new (C) IfTrueNode(iff));
966 966 if (if_slow == top()) {
967 967 // The slow branch is never taken. No need to build this guard.
968 968 return NULL;
969 969 }
970 970
971 971 if (region != NULL)
972 972 region->add_req(if_slow);
973 973
974 974 Node* if_fast = _gvn.transform(new (C) IfFalseNode(iff));
975 975 set_control(if_fast);
976 976
977 977 return if_slow;
978 978 }
979 979
980 980 inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
981 981 return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
982 982 }
983 983 inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
984 984 return generate_guard(test, region, PROB_FAIR);
985 985 }
986 986
987 987 inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
988 988 Node* *pos_index) {
989 989 if (stopped())
990 990 return NULL; // already stopped
991 991 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
992 992 return NULL; // index is already adequately typed
993 993 Node* cmp_lt = _gvn.transform(new (C) CmpINode(index, intcon(0)));
994 994 Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
995 995 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
996 996 if (is_neg != NULL && pos_index != NULL) {
997 997 // Emulate effect of Parse::adjust_map_after_if.
998 998 Node* ccast = new (C) CastIINode(index, TypeInt::POS);
999 999 ccast->set_req(0, control());
1000 1000 (*pos_index) = _gvn.transform(ccast);
1001 1001 }
1002 1002 return is_neg;
1003 1003 }
1004 1004
1005 1005 inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative,
1006 1006 Node* *pos_index) {
1007 1007 if (stopped())
1008 1008 return NULL; // already stopped
1009 1009 if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
1010 1010 return NULL; // index is already adequately typed
1011 1011 Node* cmp_le = _gvn.transform(new (C) CmpINode(index, intcon(0)));
1012 1012 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
1013 1013 Node* bol_le = _gvn.transform(new (C) BoolNode(cmp_le, le_or_eq));
1014 1014 Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
1015 1015 if (is_notp != NULL && pos_index != NULL) {
1016 1016 // Emulate effect of Parse::adjust_map_after_if.
1017 1017 Node* ccast = new (C) CastIINode(index, TypeInt::POS1);
1018 1018 ccast->set_req(0, control());
1019 1019 (*pos_index) = _gvn.transform(ccast);
1020 1020 }
1021 1021 return is_notp;
1022 1022 }
1023 1023
1024 1024 // Make sure that 'position' is a valid limit index, in [0..length].
1025 1025 // There are two equivalent plans for checking this:
1026 1026 // A. (offset + copyLength) unsigned<= arrayLength
1027 1027 // B. offset <= (arrayLength - copyLength)
1028 1028 // We require that all of the values above, except for the sum and
1029 1029 // difference, are already known to be non-negative.
1030 1030 // Plan A is robust in the face of overflow, if offset and copyLength
1031 1031 // are both hugely positive.
1032 1032 //
1033 1033 // Plan B is less direct and intuitive, but it does not overflow at
1034 1034 // all, since the difference of two non-negatives is always
1035 1035 // representable. Whenever Java methods must perform the equivalent
1036 1036 // check they generally use Plan B instead of Plan A.
1037 1037 // For the moment we use Plan A.
1038 1038 inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
1039 1039 Node* subseq_length,
1040 1040 Node* array_length,
1041 1041 RegionNode* region) {
1042 1042 if (stopped())
1043 1043 return NULL; // already stopped
1044 1044 bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
1045 1045 if (zero_offset && subseq_length->eqv_uncast(array_length))
1046 1046 return NULL; // common case of whole-array copy
1047 1047 Node* last = subseq_length;
1048 1048 if (!zero_offset) // last += offset
1049 1049 last = _gvn.transform(new (C) AddINode(last, offset));
1050 1050 Node* cmp_lt = _gvn.transform(new (C) CmpUNode(array_length, last));
1051 1051 Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
1052 1052 Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
1053 1053 return is_over;
1054 1054 }
1055 1055
1056 1056
1057 1057 //--------------------------generate_current_thread--------------------
1058 1058 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1059 1059 ciKlass* thread_klass = env()->Thread_klass();
1060 1060 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1061 1061 Node* thread = _gvn.transform(new (C) ThreadLocalNode());
1062 1062 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1063 1063 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
1064 1064 tls_output = thread;
1065 1065 return threadObj;
1066 1066 }
1067 1067
1068 1068
1069 1069 //------------------------------make_string_method_node------------------------
1070 1070 // Helper method for String intrinsic functions. This version is called
1071 1071 // with str1 and str2 pointing to String object nodes.
1072 1072 //
1073 1073 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* str2) {
1074 1074 Node* no_ctrl = NULL;
1075 1075
1076 1076 // Get start addr of string
1077 1077 Node* str1_value = load_String_value(no_ctrl, str1);
1078 1078 Node* str1_offset = load_String_offset(no_ctrl, str1);
1079 1079 Node* str1_start = array_element_address(str1_value, str1_offset, T_CHAR);
1080 1080
1081 1081 // Get length of string 1
1082 1082 Node* str1_len = load_String_length(no_ctrl, str1);
1083 1083
1084 1084 Node* str2_value = load_String_value(no_ctrl, str2);
1085 1085 Node* str2_offset = load_String_offset(no_ctrl, str2);
1086 1086 Node* str2_start = array_element_address(str2_value, str2_offset, T_CHAR);
1087 1087
1088 1088 Node* str2_len = NULL;
1089 1089 Node* result = NULL;
1090 1090
1091 1091 switch (opcode) {
1092 1092 case Op_StrIndexOf:
1093 1093 // Get length of string 2
1094 1094 str2_len = load_String_length(no_ctrl, str2);
1095 1095
1096 1096 result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
1097 1097 str1_start, str1_len, str2_start, str2_len);
1098 1098 break;
1099 1099 case Op_StrComp:
1100 1100 // Get length of string 2
1101 1101 str2_len = load_String_length(no_ctrl, str2);
1102 1102
1103 1103 result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
1104 1104 str1_start, str1_len, str2_start, str2_len);
1105 1105 break;
1106 1106 case Op_StrEquals:
1107 1107 result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
1108 1108 str1_start, str2_start, str1_len);
1109 1109 break;
1110 1110 default:
1111 1111 ShouldNotReachHere();
1112 1112 return NULL;
1113 1113 }
1114 1114
1115 1115 // All these intrinsics have checks.
1116 1116 C->set_has_split_ifs(true); // Has chance for split-if optimization
1117 1117
1118 1118 return _gvn.transform(result);
1119 1119 }
1120 1120
1121 1121 // Helper method for String intrinsic functions. This version is called
1122 1122 // with str1 and str2 pointing to char[] nodes, with cnt1 and cnt2 pointing
1123 1123 // to Int nodes containing the lenghts of str1 and str2.
1124 1124 //
1125 1125 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) {
1126 1126 Node* result = NULL;
1127 1127 switch (opcode) {
1128 1128 case Op_StrIndexOf:
1129 1129 result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
1130 1130 str1_start, cnt1, str2_start, cnt2);
1131 1131 break;
1132 1132 case Op_StrComp:
1133 1133 result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
1134 1134 str1_start, cnt1, str2_start, cnt2);
1135 1135 break;
1136 1136 case Op_StrEquals:
1137 1137 result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
1138 1138 str1_start, str2_start, cnt1);
1139 1139 break;
1140 1140 default:
1141 1141 ShouldNotReachHere();
1142 1142 return NULL;
1143 1143 }
1144 1144
1145 1145 // All these intrinsics have checks.
1146 1146 C->set_has_split_ifs(true); // Has chance for split-if optimization
1147 1147
1148 1148 return _gvn.transform(result);
1149 1149 }
1150 1150
1151 1151 //------------------------------inline_string_compareTo------------------------
1152 1152 // public int java.lang.String.compareTo(String anotherString);
1153 1153 bool LibraryCallKit::inline_string_compareTo() {
1154 1154 Node* receiver = null_check(argument(0));
1155 1155 Node* arg = null_check(argument(1));
1156 1156 if (stopped()) {
1157 1157 return true;
1158 1158 }
1159 1159 set_result(make_string_method_node(Op_StrComp, receiver, arg));
1160 1160 return true;
1161 1161 }
1162 1162
1163 1163 //------------------------------inline_string_equals------------------------
1164 1164 bool LibraryCallKit::inline_string_equals() {
1165 1165 Node* receiver = null_check_receiver();
1166 1166 // NOTE: Do not null check argument for String.equals() because spec
1167 1167 // allows to specify NULL as argument.
1168 1168 Node* argument = this->argument(1);
1169 1169 if (stopped()) {
1170 1170 return true;
1171 1171 }
1172 1172
1173 1173 // paths (plus control) merge
1174 1174 RegionNode* region = new (C) RegionNode(5);
1175 1175 Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
1176 1176
1177 1177 // does source == target string?
1178 1178 Node* cmp = _gvn.transform(new (C) CmpPNode(receiver, argument));
1179 1179 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
1180 1180
1181 1181 Node* if_eq = generate_slow_guard(bol, NULL);
1182 1182 if (if_eq != NULL) {
1183 1183 // receiver == argument
1184 1184 phi->init_req(2, intcon(1));
1185 1185 region->init_req(2, if_eq);
1186 1186 }
1187 1187
1188 1188 // get String klass for instanceOf
1189 1189 ciInstanceKlass* klass = env()->String_klass();
1190 1190
1191 1191 if (!stopped()) {
1192 1192 Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
1193 1193 Node* cmp = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
1194 1194 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
1195 1195
1196 1196 Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
1197 1197 //instanceOf == true, fallthrough
1198 1198
1199 1199 if (inst_false != NULL) {
1200 1200 phi->init_req(3, intcon(0));
1201 1201 region->init_req(3, inst_false);
1202 1202 }
1203 1203 }
1204 1204
1205 1205 if (!stopped()) {
1206 1206 const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1207 1207
1208 1208 // Properly cast the argument to String
1209 1209 argument = _gvn.transform(new (C) CheckCastPPNode(control(), argument, string_type));
1210 1210 // This path is taken only when argument's type is String:NotNull.
1211 1211 argument = cast_not_null(argument, false);
1212 1212
1213 1213 Node* no_ctrl = NULL;
1214 1214
1215 1215 // Get start addr of receiver
1216 1216 Node* receiver_val = load_String_value(no_ctrl, receiver);
1217 1217 Node* receiver_offset = load_String_offset(no_ctrl, receiver);
1218 1218 Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
1219 1219
1220 1220 // Get length of receiver
1221 1221 Node* receiver_cnt = load_String_length(no_ctrl, receiver);
1222 1222
1223 1223 // Get start addr of argument
1224 1224 Node* argument_val = load_String_value(no_ctrl, argument);
1225 1225 Node* argument_offset = load_String_offset(no_ctrl, argument);
1226 1226 Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
1227 1227
1228 1228 // Get length of argument
1229 1229 Node* argument_cnt = load_String_length(no_ctrl, argument);
1230 1230
1231 1231 // Check for receiver count != argument count
1232 1232 Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt));
1233 1233 Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne));
1234 1234 Node* if_ne = generate_slow_guard(bol, NULL);
1235 1235 if (if_ne != NULL) {
1236 1236 phi->init_req(4, intcon(0));
1237 1237 region->init_req(4, if_ne);
1238 1238 }
1239 1239
1240 1240 // Check for count == 0 is done by assembler code for StrEquals.
1241 1241
1242 1242 if (!stopped()) {
1243 1243 Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
1244 1244 phi->init_req(1, equals);
1245 1245 region->init_req(1, control());
1246 1246 }
1247 1247 }
1248 1248
1249 1249 // post merge
1250 1250 set_control(_gvn.transform(region));
1251 1251 record_for_igvn(region);
1252 1252
1253 1253 set_result(_gvn.transform(phi));
1254 1254 return true;
1255 1255 }
1256 1256
1257 1257 //------------------------------inline_array_equals----------------------------
1258 1258 bool LibraryCallKit::inline_array_equals() {
1259 1259 Node* arg1 = argument(0);
1260 1260 Node* arg2 = argument(1);
1261 1261 set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
1262 1262 return true;
1263 1263 }
1264 1264
1265 1265 // Java version of String.indexOf(constant string)
1266 1266 // class StringDecl {
1267 1267 // StringDecl(char[] ca) {
1268 1268 // offset = 0;
1269 1269 // count = ca.length;
1270 1270 // value = ca;
1271 1271 // }
1272 1272 // int offset;
1273 1273 // int count;
1274 1274 // char[] value;
1275 1275 // }
1276 1276 //
1277 1277 // static int string_indexOf_J(StringDecl string_object, char[] target_object,
1278 1278 // int targetOffset, int cache_i, int md2) {
1279 1279 // int cache = cache_i;
1280 1280 // int sourceOffset = string_object.offset;
1281 1281 // int sourceCount = string_object.count;
1282 1282 // int targetCount = target_object.length;
1283 1283 //
1284 1284 // int targetCountLess1 = targetCount - 1;
1285 1285 // int sourceEnd = sourceOffset + sourceCount - targetCountLess1;
1286 1286 //
1287 1287 // char[] source = string_object.value;
1288 1288 // char[] target = target_object;
1289 1289 // int lastChar = target[targetCountLess1];
1290 1290 //
1291 1291 // outer_loop:
1292 1292 // for (int i = sourceOffset; i < sourceEnd; ) {
1293 1293 // int src = source[i + targetCountLess1];
1294 1294 // if (src == lastChar) {
1295 1295 // // With random strings and a 4-character alphabet,
1296 1296 // // reverse matching at this point sets up 0.8% fewer
1297 1297 // // frames, but (paradoxically) makes 0.3% more probes.
1298 1298 // // Since those probes are nearer the lastChar probe,
1299 1299 // // there is may be a net D$ win with reverse matching.
1300 1300 // // But, reversing loop inhibits unroll of inner loop
1301 1301 // // for unknown reason. So, does running outer loop from
1302 1302 // // (sourceOffset - targetCountLess1) to (sourceOffset + sourceCount)
1303 1303 // for (int j = 0; j < targetCountLess1; j++) {
1304 1304 // if (target[targetOffset + j] != source[i+j]) {
1305 1305 // if ((cache & (1 << source[i+j])) == 0) {
1306 1306 // if (md2 < j+1) {
1307 1307 // i += j+1;
1308 1308 // continue outer_loop;
1309 1309 // }
1310 1310 // }
1311 1311 // i += md2;
1312 1312 // continue outer_loop;
1313 1313 // }
1314 1314 // }
1315 1315 // return i - sourceOffset;
1316 1316 // }
1317 1317 // if ((cache & (1 << src)) == 0) {
1318 1318 // i += targetCountLess1;
1319 1319 // } // using "i += targetCount;" and an "else i++;" causes a jump to jump.
1320 1320 // i++;
1321 1321 // }
1322 1322 // return -1;
1323 1323 // }
1324 1324
1325 1325 //------------------------------string_indexOf------------------------
1326 1326 Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i,
1327 1327 jint cache_i, jint md2_i) {
1328 1328
1329 1329 Node* no_ctrl = NULL;
1330 1330 float likely = PROB_LIKELY(0.9);
1331 1331 float unlikely = PROB_UNLIKELY(0.9);
1332 1332
1333 1333 const int nargs = 0; // no arguments to push back for uncommon trap in predicate
1334 1334
1335 1335 Node* source = load_String_value(no_ctrl, string_object);
1336 1336 Node* sourceOffset = load_String_offset(no_ctrl, string_object);
1337 1337 Node* sourceCount = load_String_length(no_ctrl, string_object);
1338 1338
1339 1339 Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)));
1340 1340 jint target_length = target_array->length();
1341 1341 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1342 1342 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1343 1343
1344 1344 // String.value field is known to be @Stable.
1345 1345 if (UseImplicitStableValues) {
1346 1346 target = cast_array_to_stable(target, target_type);
1347 1347 }
1348 1348
1349 1349 IdealKit kit(this, false, true);
1350 1350 #define __ kit.
1351 1351 Node* zero = __ ConI(0);
1352 1352 Node* one = __ ConI(1);
1353 1353 Node* cache = __ ConI(cache_i);
1354 1354 Node* md2 = __ ConI(md2_i);
1355 1355 Node* lastChar = __ ConI(target_array->char_at(target_length - 1));
1356 1356 Node* targetCount = __ ConI(target_length);
1357 1357 Node* targetCountLess1 = __ ConI(target_length - 1);
1358 1358 Node* targetOffset = __ ConI(targetOffset_i);
1359 1359 Node* sourceEnd = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
1360 1360
1361 1361 IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
1362 1362 Node* outer_loop = __ make_label(2 /* goto */);
1363 1363 Node* return_ = __ make_label(1);
1364 1364
1365 1365 __ set(rtn,__ ConI(-1));
1366 1366 __ loop(this, nargs, i, sourceOffset, BoolTest::lt, sourceEnd); {
1367 1367 Node* i2 = __ AddI(__ value(i), targetCountLess1);
1368 1368 // pin to prohibit loading of "next iteration" value which may SEGV (rare)
1369 1369 Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS);
1370 1370 __ if_then(src, BoolTest::eq, lastChar, unlikely); {
1371 1371 __ loop(this, nargs, j, zero, BoolTest::lt, targetCountLess1); {
1372 1372 Node* tpj = __ AddI(targetOffset, __ value(j));
1373 1373 Node* targ = load_array_element(no_ctrl, target, tpj, target_type);
1374 1374 Node* ipj = __ AddI(__ value(i), __ value(j));
1375 1375 Node* src2 = load_array_element(no_ctrl, source, ipj, TypeAryPtr::CHARS);
1376 1376 __ if_then(targ, BoolTest::ne, src2); {
1377 1377 __ if_then(__ AndI(cache, __ LShiftI(one, src2)), BoolTest::eq, zero); {
1378 1378 __ if_then(md2, BoolTest::lt, __ AddI(__ value(j), one)); {
1379 1379 __ increment(i, __ AddI(__ value(j), one));
1380 1380 __ goto_(outer_loop);
1381 1381 } __ end_if(); __ dead(j);
1382 1382 }__ end_if(); __ dead(j);
1383 1383 __ increment(i, md2);
1384 1384 __ goto_(outer_loop);
1385 1385 }__ end_if();
1386 1386 __ increment(j, one);
1387 1387 }__ end_loop(); __ dead(j);
1388 1388 __ set(rtn, __ SubI(__ value(i), sourceOffset)); __ dead(i);
1389 1389 __ goto_(return_);
1390 1390 }__ end_if();
1391 1391 __ if_then(__ AndI(cache, __ LShiftI(one, src)), BoolTest::eq, zero, likely); {
1392 1392 __ increment(i, targetCountLess1);
1393 1393 }__ end_if();
1394 1394 __ increment(i, one);
1395 1395 __ bind(outer_loop);
1396 1396 }__ end_loop(); __ dead(i);
1397 1397 __ bind(return_);
1398 1398
1399 1399 // Final sync IdealKit and GraphKit.
1400 1400 final_sync(kit);
1401 1401 Node* result = __ value(rtn);
1402 1402 #undef __
1403 1403 C->set_has_loops(true);
1404 1404 return result;
1405 1405 }
1406 1406
1407 1407 //------------------------------inline_string_indexOf------------------------
1408 1408 bool LibraryCallKit::inline_string_indexOf() {
1409 1409 Node* receiver = argument(0);
1410 1410 Node* arg = argument(1);
1411 1411
1412 1412 Node* result;
1413 1413 // Disable the use of pcmpestri until it can be guaranteed that
1414 1414 // the load doesn't cross into the uncommited space.
1415 1415 if (Matcher::has_match_rule(Op_StrIndexOf) &&
1416 1416 UseSSE42Intrinsics) {
1417 1417 // Generate SSE4.2 version of indexOf
1418 1418 // We currently only have match rules that use SSE4.2
1419 1419
1420 1420 receiver = null_check(receiver);
1421 1421 arg = null_check(arg);
1422 1422 if (stopped()) {
1423 1423 return true;
1424 1424 }
1425 1425
1426 1426 ciInstanceKlass* str_klass = env()->String_klass();
1427 1427 const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass);
1428 1428
1429 1429 // Make the merge point
1430 1430 RegionNode* result_rgn = new (C) RegionNode(4);
1431 1431 Node* result_phi = new (C) PhiNode(result_rgn, TypeInt::INT);
1432 1432 Node* no_ctrl = NULL;
1433 1433
1434 1434 // Get start addr of source string
1435 1435 Node* source = load_String_value(no_ctrl, receiver);
1436 1436 Node* source_offset = load_String_offset(no_ctrl, receiver);
1437 1437 Node* source_start = array_element_address(source, source_offset, T_CHAR);
1438 1438
1439 1439 // Get length of source string
1440 1440 Node* source_cnt = load_String_length(no_ctrl, receiver);
1441 1441
1442 1442 // Get start addr of substring
1443 1443 Node* substr = load_String_value(no_ctrl, arg);
1444 1444 Node* substr_offset = load_String_offset(no_ctrl, arg);
1445 1445 Node* substr_start = array_element_address(substr, substr_offset, T_CHAR);
1446 1446
1447 1447 // Get length of source string
1448 1448 Node* substr_cnt = load_String_length(no_ctrl, arg);
1449 1449
1450 1450 // Check for substr count > string count
1451 1451 Node* cmp = _gvn.transform(new(C) CmpINode(substr_cnt, source_cnt));
1452 1452 Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::gt));
1453 1453 Node* if_gt = generate_slow_guard(bol, NULL);
1454 1454 if (if_gt != NULL) {
1455 1455 result_phi->init_req(2, intcon(-1));
1456 1456 result_rgn->init_req(2, if_gt);
1457 1457 }
1458 1458
1459 1459 if (!stopped()) {
1460 1460 // Check for substr count == 0
1461 1461 cmp = _gvn.transform(new(C) CmpINode(substr_cnt, intcon(0)));
1462 1462 bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
1463 1463 Node* if_zero = generate_slow_guard(bol, NULL);
1464 1464 if (if_zero != NULL) {
1465 1465 result_phi->init_req(3, intcon(0));
1466 1466 result_rgn->init_req(3, if_zero);
1467 1467 }
1468 1468 }
1469 1469
1470 1470 if (!stopped()) {
1471 1471 result = make_string_method_node(Op_StrIndexOf, source_start, source_cnt, substr_start, substr_cnt);
1472 1472 result_phi->init_req(1, result);
1473 1473 result_rgn->init_req(1, control());
1474 1474 }
1475 1475 set_control(_gvn.transform(result_rgn));
1476 1476 record_for_igvn(result_rgn);
1477 1477 result = _gvn.transform(result_phi);
1478 1478
1479 1479 } else { // Use LibraryCallKit::string_indexOf
1480 1480 // don't intrinsify if argument isn't a constant string.
1481 1481 if (!arg->is_Con()) {
1482 1482 return false;
1483 1483 }
1484 1484 const TypeOopPtr* str_type = _gvn.type(arg)->isa_oopptr();
1485 1485 if (str_type == NULL) {
1486 1486 return false;
1487 1487 }
1488 1488 ciInstanceKlass* klass = env()->String_klass();
1489 1489 ciObject* str_const = str_type->const_oop();
1490 1490 if (str_const == NULL || str_const->klass() != klass) {
1491 1491 return false;
1492 1492 }
1493 1493 ciInstance* str = str_const->as_instance();
1494 1494 assert(str != NULL, "must be instance");
1495 1495
1496 1496 ciObject* v = str->field_value_by_offset(java_lang_String::value_offset_in_bytes()).as_object();
1497 1497 ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array
1498 1498
1499 1499 int o;
1500 1500 int c;
1501 1501 if (java_lang_String::has_offset_field()) {
1502 1502 o = str->field_value_by_offset(java_lang_String::offset_offset_in_bytes()).as_int();
1503 1503 c = str->field_value_by_offset(java_lang_String::count_offset_in_bytes()).as_int();
1504 1504 } else {
1505 1505 o = 0;
1506 1506 c = pat->length();
1507 1507 }
1508 1508
1509 1509 // constant strings have no offset and count == length which
1510 1510 // simplifies the resulting code somewhat so lets optimize for that.
1511 1511 if (o != 0 || c != pat->length()) {
1512 1512 return false;
1513 1513 }
1514 1514
1515 1515 receiver = null_check(receiver, T_OBJECT);
1516 1516 // NOTE: No null check on the argument is needed since it's a constant String oop.
1517 1517 if (stopped()) {
1518 1518 return true;
1519 1519 }
1520 1520
1521 1521 // The null string as a pattern always returns 0 (match at beginning of string)
1522 1522 if (c == 0) {
1523 1523 set_result(intcon(0));
1524 1524 return true;
1525 1525 }
1526 1526
1527 1527 // Generate default indexOf
1528 1528 jchar lastChar = pat->char_at(o + (c - 1));
1529 1529 int cache = 0;
1530 1530 int i;
1531 1531 for (i = 0; i < c - 1; i++) {
1532 1532 assert(i < pat->length(), "out of range");
1533 1533 cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1)));
1534 1534 }
1535 1535
1536 1536 int md2 = c;
1537 1537 for (i = 0; i < c - 1; i++) {
1538 1538 assert(i < pat->length(), "out of range");
1539 1539 if (pat->char_at(o + i) == lastChar) {
1540 1540 md2 = (c - 1) - i;
1541 1541 }
1542 1542 }
1543 1543
1544 1544 result = string_indexOf(receiver, pat, o, cache, md2);
1545 1545 }
1546 1546 set_result(result);
1547 1547 return true;
1548 1548 }
1549 1549
1550 1550 //--------------------------round_double_node--------------------------------
1551 1551 // Round a double node if necessary.
1552 1552 Node* LibraryCallKit::round_double_node(Node* n) {
1553 1553 if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1554 1554 n = _gvn.transform(new (C) RoundDoubleNode(0, n));
1555 1555 return n;
1556 1556 }
1557 1557
1558 1558 //------------------------------inline_math-----------------------------------
1559 1559 // public static double Math.abs(double)
1560 1560 // public static double Math.sqrt(double)
1561 1561 // public static double Math.log(double)
1562 1562 // public static double Math.log10(double)
1563 1563 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1564 1564 Node* arg = round_double_node(argument(0));
1565 1565 Node* n;
1566 1566 switch (id) {
1567 1567 case vmIntrinsics::_dabs: n = new (C) AbsDNode( arg); break;
1568 1568 case vmIntrinsics::_dsqrt: n = new (C) SqrtDNode(C, control(), arg); break;
1569 1569 case vmIntrinsics::_dlog: n = new (C) LogDNode(C, control(), arg); break;
1570 1570 case vmIntrinsics::_dlog10: n = new (C) Log10DNode(C, control(), arg); break;
1571 1571 default: fatal_unexpected_iid(id); break;
1572 1572 }
1573 1573 set_result(_gvn.transform(n));
1574 1574 return true;
1575 1575 }
1576 1576
1577 1577 //------------------------------inline_trig----------------------------------
1578 1578 // Inline sin/cos/tan instructions, if possible. If rounding is required, do
1579 1579 // argument reduction which will turn into a fast/slow diamond.
1580 1580 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
1581 1581 Node* arg = round_double_node(argument(0));
1582 1582 Node* n = NULL;
1583 1583
1584 1584 switch (id) {
1585 1585 case vmIntrinsics::_dsin: n = new (C) SinDNode(C, control(), arg); break;
1586 1586 case vmIntrinsics::_dcos: n = new (C) CosDNode(C, control(), arg); break;
1587 1587 case vmIntrinsics::_dtan: n = new (C) TanDNode(C, control(), arg); break;
1588 1588 default: fatal_unexpected_iid(id); break;
1589 1589 }
1590 1590 n = _gvn.transform(n);
1591 1591
1592 1592 // Rounding required? Check for argument reduction!
1593 1593 if (Matcher::strict_fp_requires_explicit_rounding) {
1594 1594 static const double pi_4 = 0.7853981633974483;
1595 1595 static const double neg_pi_4 = -0.7853981633974483;
1596 1596 // pi/2 in 80-bit extended precision
1597 1597 // static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00};
1598 1598 // -pi/2 in 80-bit extended precision
1599 1599 // static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00};
1600 1600 // Cutoff value for using this argument reduction technique
1601 1601 //static const double pi_2_minus_epsilon = 1.564660403643354;
1602 1602 //static const double neg_pi_2_plus_epsilon = -1.564660403643354;
1603 1603
1604 1604 // Pseudocode for sin:
1605 1605 // if (x <= Math.PI / 4.0) {
1606 1606 // if (x >= -Math.PI / 4.0) return fsin(x);
1607 1607 // if (x >= -Math.PI / 2.0) return -fcos(x + Math.PI / 2.0);
1608 1608 // } else {
1609 1609 // if (x <= Math.PI / 2.0) return fcos(x - Math.PI / 2.0);
1610 1610 // }
1611 1611 // return StrictMath.sin(x);
1612 1612
1613 1613 // Pseudocode for cos:
1614 1614 // if (x <= Math.PI / 4.0) {
1615 1615 // if (x >= -Math.PI / 4.0) return fcos(x);
1616 1616 // if (x >= -Math.PI / 2.0) return fsin(x + Math.PI / 2.0);
1617 1617 // } else {
1618 1618 // if (x <= Math.PI / 2.0) return -fsin(x - Math.PI / 2.0);
1619 1619 // }
1620 1620 // return StrictMath.cos(x);
1621 1621
1622 1622 // Actually, sticking in an 80-bit Intel value into C2 will be tough; it
1623 1623 // requires a special machine instruction to load it. Instead we'll try
1624 1624 // the 'easy' case. If we really need the extra range +/- PI/2 we'll
1625 1625 // probably do the math inside the SIN encoding.
1626 1626
1627 1627 // Make the merge point
1628 1628 RegionNode* r = new (C) RegionNode(3);
1629 1629 Node* phi = new (C) PhiNode(r, Type::DOUBLE);
1630 1630
1631 1631 // Flatten arg so we need only 1 test
1632 1632 Node *abs = _gvn.transform(new (C) AbsDNode(arg));
1633 1633 // Node for PI/4 constant
1634 1634 Node *pi4 = makecon(TypeD::make(pi_4));
1635 1635 // Check PI/4 : abs(arg)
1636 1636 Node *cmp = _gvn.transform(new (C) CmpDNode(pi4,abs));
1637 1637 // Check: If PI/4 < abs(arg) then go slow
1638 1638 Node *bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::lt ));
1639 1639 // Branch either way
1640 1640 IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1641 1641 set_control(opt_iff(r,iff));
1642 1642
1643 1643 // Set fast path result
1644 1644 phi->init_req(2, n);
1645 1645
1646 1646 // Slow path - non-blocking leaf call
1647 1647 Node* call = NULL;
1648 1648 switch (id) {
1649 1649 case vmIntrinsics::_dsin:
1650 1650 call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1651 1651 CAST_FROM_FN_PTR(address, SharedRuntime::dsin),
1652 1652 "Sin", NULL, arg, top());
1653 1653 break;
1654 1654 case vmIntrinsics::_dcos:
1655 1655 call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1656 1656 CAST_FROM_FN_PTR(address, SharedRuntime::dcos),
1657 1657 "Cos", NULL, arg, top());
1658 1658 break;
1659 1659 case vmIntrinsics::_dtan:
1660 1660 call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1661 1661 CAST_FROM_FN_PTR(address, SharedRuntime::dtan),
1662 1662 "Tan", NULL, arg, top());
1663 1663 break;
1664 1664 }
1665 1665 assert(control()->in(0) == call, "");
1666 1666 Node* slow_result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
1667 1667 r->init_req(1, control());
1668 1668 phi->init_req(1, slow_result);
1669 1669
1670 1670 // Post-merge
1671 1671 set_control(_gvn.transform(r));
1672 1672 record_for_igvn(r);
1673 1673 n = _gvn.transform(phi);
1674 1674
1675 1675 C->set_has_split_ifs(true); // Has chance for split-if optimization
1676 1676 }
1677 1677 set_result(n);
1678 1678 return true;
1679 1679 }
1680 1680
1681 1681 void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
1682 1682 //-------------------
1683 1683 //result=(result.isNaN())? funcAddr():result;
1684 1684 // Check: If isNaN() by checking result!=result? then either trap
1685 1685 // or go to runtime
1686 1686 Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result, result));
1687 1687 // Build the boolean node
1688 1688 Node* bolisnum = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::eq));
1689 1689
1690 1690 if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1691 1691 { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1692 1692 // The pow or exp intrinsic returned a NaN, which requires a call
1693 1693 // to the runtime. Recompile with the runtime call.
1694 1694 uncommon_trap(Deoptimization::Reason_intrinsic,
1695 1695 Deoptimization::Action_make_not_entrant);
1696 1696 }
1697 1697 set_result(result);
1698 1698 } else {
1699 1699 // If this inlining ever returned NaN in the past, we compile a call
1700 1700 // to the runtime to properly handle corner cases
1701 1701
1702 1702 IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1703 1703 Node* if_slow = _gvn.transform(new (C) IfFalseNode(iff));
1704 1704 Node* if_fast = _gvn.transform(new (C) IfTrueNode(iff));
1705 1705
1706 1706 if (!if_slow->is_top()) {
1707 1707 RegionNode* result_region = new (C) RegionNode(3);
1708 1708 PhiNode* result_val = new (C) PhiNode(result_region, Type::DOUBLE);
1709 1709
1710 1710 result_region->init_req(1, if_fast);
1711 1711 result_val->init_req(1, result);
1712 1712
1713 1713 set_control(if_slow);
1714 1714
1715 1715 const TypePtr* no_memory_effects = NULL;
1716 1716 Node* rt = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1717 1717 no_memory_effects,
1718 1718 x, top(), y, y ? top() : NULL);
1719 1719 Node* value = _gvn.transform(new (C) ProjNode(rt, TypeFunc::Parms+0));
1720 1720 #ifdef ASSERT
1721 1721 Node* value_top = _gvn.transform(new (C) ProjNode(rt, TypeFunc::Parms+1));
1722 1722 assert(value_top == top(), "second value must be top");
1723 1723 #endif
1724 1724
1725 1725 result_region->init_req(2, control());
1726 1726 result_val->init_req(2, value);
1727 1727 set_result(result_region, result_val);
1728 1728 } else {
1729 1729 set_result(result);
1730 1730 }
1731 1731 }
1732 1732 }
1733 1733
1734 1734 //------------------------------inline_exp-------------------------------------
1735 1735 // Inline exp instructions, if possible. The Intel hardware only misses
1736 1736 // really odd corner cases (+/- Infinity). Just uncommon-trap them.
1737 1737 bool LibraryCallKit::inline_exp() {
1738 1738 Node* arg = round_double_node(argument(0));
1739 1739 Node* n = _gvn.transform(new (C) ExpDNode(C, control(), arg));
1740 1740
1741 1741 finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1742 1742
1743 1743 C->set_has_split_ifs(true); // Has chance for split-if optimization
1744 1744 return true;
1745 1745 }
1746 1746
1747 1747 //------------------------------inline_pow-------------------------------------
1748 1748 // Inline power instructions, if possible.
1749 1749 bool LibraryCallKit::inline_pow() {
1750 1750 // Pseudocode for pow
1751 1751 // if (x <= 0.0) {
1752 1752 // long longy = (long)y;
1753 1753 // if ((double)longy == y) { // if y is long
1754 1754 // if (y + 1 == y) longy = 0; // huge number: even
1755 1755 // result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y);
1756 1756 // } else {
1757 1757 // result = NaN;
1758 1758 // }
1759 1759 // } else {
1760 1760 // result = DPow(x,y);
1761 1761 // }
1762 1762 // if (result != result)? {
1763 1763 // result = uncommon_trap() or runtime_call();
1764 1764 // }
1765 1765 // return result;
1766 1766
1767 1767 Node* x = round_double_node(argument(0));
1768 1768 Node* y = round_double_node(argument(2));
1769 1769
1770 1770 Node* result = NULL;
1771 1771
1772 1772 if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1773 1773 // Short form: skip the fancy tests and just check for NaN result.
1774 1774 result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
1775 1775 } else {
1776 1776 // If this inlining ever returned NaN in the past, include all
1777 1777 // checks + call to the runtime.
1778 1778
1779 1779 // Set the merge point for If node with condition of (x <= 0.0)
1780 1780 // There are four possible paths to region node and phi node
1781 1781 RegionNode *r = new (C) RegionNode(4);
1782 1782 Node *phi = new (C) PhiNode(r, Type::DOUBLE);
1783 1783
1784 1784 // Build the first if node: if (x <= 0.0)
1785 1785 // Node for 0 constant
1786 1786 Node *zeronode = makecon(TypeD::ZERO);
1787 1787 // Check x:0
1788 1788 Node *cmp = _gvn.transform(new (C) CmpDNode(x, zeronode));
1789 1789 // Check: If (x<=0) then go complex path
1790 1790 Node *bol1 = _gvn.transform(new (C) BoolNode( cmp, BoolTest::le ));
1791 1791 // Branch either way
1792 1792 IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1793 1793 // Fast path taken; set region slot 3
1794 1794 Node *fast_taken = _gvn.transform(new (C) IfFalseNode(if1));
1795 1795 r->init_req(3,fast_taken); // Capture fast-control
1796 1796
1797 1797 // Fast path not-taken, i.e. slow path
1798 1798 Node *complex_path = _gvn.transform(new (C) IfTrueNode(if1));
1799 1799
1800 1800 // Set fast path result
1801 1801 Node *fast_result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
1802 1802 phi->init_req(3, fast_result);
1803 1803
1804 1804 // Complex path
1805 1805 // Build the second if node (if y is long)
1806 1806 // Node for (long)y
1807 1807 Node *longy = _gvn.transform(new (C) ConvD2LNode(y));
1808 1808 // Node for (double)((long) y)
1809 1809 Node *doublelongy= _gvn.transform(new (C) ConvL2DNode(longy));
1810 1810 // Check (double)((long) y) : y
1811 1811 Node *cmplongy= _gvn.transform(new (C) CmpDNode(doublelongy, y));
1812 1812 // Check if (y isn't long) then go to slow path
1813 1813
1814 1814 Node *bol2 = _gvn.transform(new (C) BoolNode( cmplongy, BoolTest::ne ));
1815 1815 // Branch either way
1816 1816 IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1817 1817 Node* ylong_path = _gvn.transform(new (C) IfFalseNode(if2));
1818 1818
1819 1819 Node *slow_path = _gvn.transform(new (C) IfTrueNode(if2));
1820 1820
1821 1821 // Calculate DPow(abs(x), y)*(1 & (long)y)
1822 1822 // Node for constant 1
1823 1823 Node *conone = longcon(1);
1824 1824 // 1& (long)y
1825 1825 Node *signnode= _gvn.transform(new (C) AndLNode(conone, longy));
1826 1826
1827 1827 // A huge number is always even. Detect a huge number by checking
1828 1828 // if y + 1 == y and set integer to be tested for parity to 0.
1829 1829 // Required for corner case:
1830 1830 // (long)9.223372036854776E18 = max_jlong
1831 1831 // (double)(long)9.223372036854776E18 = 9.223372036854776E18
1832 1832 // max_jlong is odd but 9.223372036854776E18 is even
1833 1833 Node* yplus1 = _gvn.transform(new (C) AddDNode(y, makecon(TypeD::make(1))));
1834 1834 Node *cmpyplus1= _gvn.transform(new (C) CmpDNode(yplus1, y));
1835 1835 Node *bolyplus1 = _gvn.transform(new (C) BoolNode( cmpyplus1, BoolTest::eq ));
1836 1836 Node* correctedsign = NULL;
1837 1837 if (ConditionalMoveLimit != 0) {
1838 1838 correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
1839 1839 } else {
1840 1840 IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN);
1841 1841 RegionNode *r = new (C) RegionNode(3);
1842 1842 Node *phi = new (C) PhiNode(r, TypeLong::LONG);
1843 1843 r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyplus1)));
1844 1844 r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyplus1)));
1845 1845 phi->init_req(1, signnode);
1846 1846 phi->init_req(2, longcon(0));
1847 1847 correctedsign = _gvn.transform(phi);
1848 1848 ylong_path = _gvn.transform(r);
1849 1849 record_for_igvn(r);
1850 1850 }
1851 1851
1852 1852 // zero node
1853 1853 Node *conzero = longcon(0);
1854 1854 // Check (1&(long)y)==0?
1855 1855 Node *cmpeq1 = _gvn.transform(new (C) CmpLNode(correctedsign, conzero));
1856 1856 // Check if (1&(long)y)!=0?, if so the result is negative
1857 1857 Node *bol3 = _gvn.transform(new (C) BoolNode( cmpeq1, BoolTest::ne ));
1858 1858 // abs(x)
1859 1859 Node *absx=_gvn.transform(new (C) AbsDNode(x));
1860 1860 // abs(x)^y
1861 1861 Node *absxpowy = _gvn.transform(new (C) PowDNode(C, control(), absx, y));
1862 1862 // -abs(x)^y
1863 1863 Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy));
1864 1864 // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
1865 1865 Node *signresult = NULL;
1866 1866 if (ConditionalMoveLimit != 0) {
1867 1867 signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
1868 1868 } else {
1869 1869 IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN);
1870 1870 RegionNode *r = new (C) RegionNode(3);
1871 1871 Node *phi = new (C) PhiNode(r, Type::DOUBLE);
1872 1872 r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyeven)));
1873 1873 r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyeven)));
1874 1874 phi->init_req(1, absxpowy);
1875 1875 phi->init_req(2, negabsxpowy);
1876 1876 signresult = _gvn.transform(phi);
1877 1877 ylong_path = _gvn.transform(r);
1878 1878 record_for_igvn(r);
1879 1879 }
1880 1880 // Set complex path fast result
1881 1881 r->init_req(2, ylong_path);
1882 1882 phi->init_req(2, signresult);
1883 1883
1884 1884 static const jlong nan_bits = CONST64(0x7ff8000000000000);
1885 1885 Node *slow_result = makecon(TypeD::make(*(double*)&nan_bits)); // return NaN
1886 1886 r->init_req(1,slow_path);
1887 1887 phi->init_req(1,slow_result);
1888 1888
1889 1889 // Post merge
1890 1890 set_control(_gvn.transform(r));
1891 1891 record_for_igvn(r);
1892 1892 result = _gvn.transform(phi);
1893 1893 }
1894 1894
1895 1895 finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
1896 1896
1897 1897 C->set_has_split_ifs(true); // Has chance for split-if optimization
1898 1898 return true;
1899 1899 }
1900 1900
1901 1901 //------------------------------runtime_math-----------------------------
1902 1902 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1903 1903 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1904 1904 "must be (DD)D or (D)D type");
1905 1905
1906 1906 // Inputs
1907 1907 Node* a = round_double_node(argument(0));
1908 1908 Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL;
1909 1909
1910 1910 const TypePtr* no_memory_effects = NULL;
1911 1911 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1912 1912 no_memory_effects,
1913 1913 a, top(), b, b ? top() : NULL);
1914 1914 Node* value = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+0));
1915 1915 #ifdef ASSERT
1916 1916 Node* value_top = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+1));
1917 1917 assert(value_top == top(), "second value must be top");
1918 1918 #endif
1919 1919
1920 1920 set_result(value);
1921 1921 return true;
1922 1922 }
1923 1923
1924 1924 //------------------------------inline_math_native-----------------------------
1925 1925 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1926 1926 #define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
1927 1927 switch (id) {
1928 1928 // These intrinsics are not properly supported on all hardware
1929 1929 case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) :
1930 1930 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos), "COS");
1931 1931 case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) :
1932 1932 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin), "SIN");
1933 1933 case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) :
1934 1934 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan), "TAN");
1935 1935
1936 1936 case vmIntrinsics::_dlog: return Matcher::has_match_rule(Op_LogD) ? inline_math(id) :
1937 1937 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG");
1938 1938 case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) :
1939 1939 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
1940 1940
1941 1941 // These intrinsics are supported on all hardware
1942 1942 case vmIntrinsics::_dsqrt: return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false;
1943 1943 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false;
1944 1944
1945 1945 case vmIntrinsics::_dexp: return Matcher::has_match_rule(Op_ExpD) ? inline_exp() :
1946 1946 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP");
1947 1947 case vmIntrinsics::_dpow: return Matcher::has_match_rule(Op_PowD) ? inline_pow() :
1948 1948 runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow), "POW");
1949 1949 #undef FN_PTR
1950 1950
1951 1951 // These intrinsics are not yet correctly implemented
1952 1952 case vmIntrinsics::_datan2:
1953 1953 return false;
1954 1954
1955 1955 default:
1956 1956 fatal_unexpected_iid(id);
1957 1957 return false;
1958 1958 }
1959 1959 }
1960 1960
1961 1961 static bool is_simple_name(Node* n) {
1962 1962 return (n->req() == 1 // constant
1963 1963 || (n->is_Type() && n->as_Type()->type()->singleton())
1964 1964 || n->is_Proj() // parameter or return value
1965 1965 || n->is_Phi() // local of some sort
1966 1966 );
1967 1967 }
1968 1968
1969 1969 //----------------------------inline_min_max-----------------------------------
1970 1970 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1971 1971 set_result(generate_min_max(id, argument(0), argument(1)));
1972 1972 return true;
1973 1973 }
1974 1974
1975 1975 void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
1976 1976 Node* bol = _gvn.transform( new (C) BoolNode(test, BoolTest::overflow) );
1977 1977 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
1978 1978 Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
1979 1979 Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
1980 1980
1981 1981 {
1982 1982 PreserveJVMState pjvms(this);
1983 1983 PreserveReexecuteState preexecs(this);
1984 1984 jvms()->set_should_reexecute(true);
1985 1985
1986 1986 set_control(slow_path);
1987 1987 set_i_o(i_o());
1988 1988
1989 1989 uncommon_trap(Deoptimization::Reason_intrinsic,
1990 1990 Deoptimization::Action_none);
1991 1991 }
1992 1992
1993 1993 set_control(fast_path);
1994 1994 set_result(math);
1995 1995 }
1996 1996
1997 1997 template <typename OverflowOp>
1998 1998 bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
1999 1999 typedef typename OverflowOp::MathOp MathOp;
2000 2000
2001 2001 MathOp* mathOp = new(C) MathOp(arg1, arg2);
2002 2002 Node* operation = _gvn.transform( mathOp );
2003 2003 Node* ofcheck = _gvn.transform( new(C) OverflowOp(arg1, arg2) );
2004 2004 inline_math_mathExact(operation, ofcheck);
2005 2005 return true;
2006 2006 }
2007 2007
2008 2008 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
2009 2009 return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
2010 2010 }
2011 2011
2012 2012 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
2013 2013 return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
2014 2014 }
2015 2015
2016 2016 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
2017 2017 return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
2018 2018 }
2019 2019
2020 2020 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
2021 2021 return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
2022 2022 }
2023 2023
2024 2024 bool LibraryCallKit::inline_math_negateExactI() {
2025 2025 return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
2026 2026 }
2027 2027
2028 2028 bool LibraryCallKit::inline_math_negateExactL() {
2029 2029 return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
2030 2030 }
2031 2031
2032 2032 bool LibraryCallKit::inline_math_multiplyExactI() {
2033 2033 return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
2034 2034 }
2035 2035
2036 2036 bool LibraryCallKit::inline_math_multiplyExactL() {
2037 2037 return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
2038 2038 }
2039 2039
2040 2040 Node*
2041 2041 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
2042 2042 // These are the candidate return value:
2043 2043 Node* xvalue = x0;
2044 2044 Node* yvalue = y0;
2045 2045
2046 2046 if (xvalue == yvalue) {
2047 2047 return xvalue;
2048 2048 }
2049 2049
2050 2050 bool want_max = (id == vmIntrinsics::_max);
2051 2051
2052 2052 const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
2053 2053 const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
2054 2054 if (txvalue == NULL || tyvalue == NULL) return top();
2055 2055 // This is not really necessary, but it is consistent with a
2056 2056 // hypothetical MaxINode::Value method:
2057 2057 int widen = MAX2(txvalue->_widen, tyvalue->_widen);
2058 2058
2059 2059 // %%% This folding logic should (ideally) be in a different place.
2060 2060 // Some should be inside IfNode, and there to be a more reliable
2061 2061 // transformation of ?: style patterns into cmoves. We also want
2062 2062 // more powerful optimizations around cmove and min/max.
2063 2063
2064 2064 // Try to find a dominating comparison of these guys.
2065 2065 // It can simplify the index computation for Arrays.copyOf
2066 2066 // and similar uses of System.arraycopy.
2067 2067 // First, compute the normalized version of CmpI(x, y).
2068 2068 int cmp_op = Op_CmpI;
2069 2069 Node* xkey = xvalue;
2070 2070 Node* ykey = yvalue;
2071 2071 Node* ideal_cmpxy = _gvn.transform(new(C) CmpINode(xkey, ykey));
2072 2072 if (ideal_cmpxy->is_Cmp()) {
2073 2073 // E.g., if we have CmpI(length - offset, count),
2074 2074 // it might idealize to CmpI(length, count + offset)
2075 2075 cmp_op = ideal_cmpxy->Opcode();
2076 2076 xkey = ideal_cmpxy->in(1);
2077 2077 ykey = ideal_cmpxy->in(2);
2078 2078 }
2079 2079
2080 2080 // Start by locating any relevant comparisons.
2081 2081 Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
2082 2082 Node* cmpxy = NULL;
2083 2083 Node* cmpyx = NULL;
2084 2084 for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
2085 2085 Node* cmp = start_from->fast_out(k);
2086 2086 if (cmp->outcnt() > 0 && // must have prior uses
2087 2087 cmp->in(0) == NULL && // must be context-independent
2088 2088 cmp->Opcode() == cmp_op) { // right kind of compare
2089 2089 if (cmp->in(1) == xkey && cmp->in(2) == ykey) cmpxy = cmp;
2090 2090 if (cmp->in(1) == ykey && cmp->in(2) == xkey) cmpyx = cmp;
2091 2091 }
2092 2092 }
2093 2093
2094 2094 const int NCMPS = 2;
2095 2095 Node* cmps[NCMPS] = { cmpxy, cmpyx };
2096 2096 int cmpn;
2097 2097 for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2098 2098 if (cmps[cmpn] != NULL) break; // find a result
2099 2099 }
2100 2100 if (cmpn < NCMPS) {
2101 2101 // Look for a dominating test that tells us the min and max.
2102 2102 int depth = 0; // Limit search depth for speed
2103 2103 Node* dom = control();
2104 2104 for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
2105 2105 if (++depth >= 100) break;
2106 2106 Node* ifproj = dom;
2107 2107 if (!ifproj->is_Proj()) continue;
2108 2108 Node* iff = ifproj->in(0);
2109 2109 if (!iff->is_If()) continue;
2110 2110 Node* bol = iff->in(1);
2111 2111 if (!bol->is_Bool()) continue;
2112 2112 Node* cmp = bol->in(1);
2113 2113 if (cmp == NULL) continue;
2114 2114 for (cmpn = 0; cmpn < NCMPS; cmpn++)
2115 2115 if (cmps[cmpn] == cmp) break;
2116 2116 if (cmpn == NCMPS) continue;
2117 2117 BoolTest::mask btest = bol->as_Bool()->_test._test;
2118 2118 if (ifproj->is_IfFalse()) btest = BoolTest(btest).negate();
2119 2119 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
2120 2120 // At this point, we know that 'x btest y' is true.
2121 2121 switch (btest) {
2122 2122 case BoolTest::eq:
2123 2123 // They are proven equal, so we can collapse the min/max.
2124 2124 // Either value is the answer. Choose the simpler.
2125 2125 if (is_simple_name(yvalue) && !is_simple_name(xvalue))
2126 2126 return yvalue;
2127 2127 return xvalue;
2128 2128 case BoolTest::lt: // x < y
2129 2129 case BoolTest::le: // x <= y
2130 2130 return (want_max ? yvalue : xvalue);
2131 2131 case BoolTest::gt: // x > y
2132 2132 case BoolTest::ge: // x >= y
2133 2133 return (want_max ? xvalue : yvalue);
2134 2134 }
2135 2135 }
2136 2136 }
2137 2137
2138 2138 // We failed to find a dominating test.
2139 2139 // Let's pick a test that might GVN with prior tests.
2140 2140 Node* best_bol = NULL;
2141 2141 BoolTest::mask best_btest = BoolTest::illegal;
2142 2142 for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2143 2143 Node* cmp = cmps[cmpn];
2144 2144 if (cmp == NULL) continue;
2145 2145 for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
2146 2146 Node* bol = cmp->fast_out(j);
2147 2147 if (!bol->is_Bool()) continue;
2148 2148 BoolTest::mask btest = bol->as_Bool()->_test._test;
2149 2149 if (btest == BoolTest::eq || btest == BoolTest::ne) continue;
2150 2150 if (cmp->in(1) == ykey) btest = BoolTest(btest).commute();
2151 2151 if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
2152 2152 best_bol = bol->as_Bool();
2153 2153 best_btest = btest;
2154 2154 }
2155 2155 }
2156 2156 }
2157 2157
2158 2158 Node* answer_if_true = NULL;
2159 2159 Node* answer_if_false = NULL;
2160 2160 switch (best_btest) {
2161 2161 default:
2162 2162 if (cmpxy == NULL)
2163 2163 cmpxy = ideal_cmpxy;
2164 2164 best_bol = _gvn.transform(new(C) BoolNode(cmpxy, BoolTest::lt));
2165 2165 // and fall through:
2166 2166 case BoolTest::lt: // x < y
2167 2167 case BoolTest::le: // x <= y
2168 2168 answer_if_true = (want_max ? yvalue : xvalue);
2169 2169 answer_if_false = (want_max ? xvalue : yvalue);
2170 2170 break;
2171 2171 case BoolTest::gt: // x > y
2172 2172 case BoolTest::ge: // x >= y
2173 2173 answer_if_true = (want_max ? xvalue : yvalue);
2174 2174 answer_if_false = (want_max ? yvalue : xvalue);
2175 2175 break;
2176 2176 }
2177 2177
2178 2178 jint hi, lo;
2179 2179 if (want_max) {
2180 2180 // We can sharpen the minimum.
2181 2181 hi = MAX2(txvalue->_hi, tyvalue->_hi);
2182 2182 lo = MAX2(txvalue->_lo, tyvalue->_lo);
2183 2183 } else {
2184 2184 // We can sharpen the maximum.
2185 2185 hi = MIN2(txvalue->_hi, tyvalue->_hi);
2186 2186 lo = MIN2(txvalue->_lo, tyvalue->_lo);
2187 2187 }
2188 2188
2189 2189 // Use a flow-free graph structure, to avoid creating excess control edges
2190 2190 // which could hinder other optimizations.
2191 2191 // Since Math.min/max is often used with arraycopy, we want
2192 2192 // tightly_coupled_allocation to be able to see beyond min/max expressions.
2193 2193 Node* cmov = CMoveNode::make(C, NULL, best_bol,
2194 2194 answer_if_false, answer_if_true,
2195 2195 TypeInt::make(lo, hi, widen));
2196 2196
2197 2197 return _gvn.transform(cmov);
2198 2198
2199 2199 /*
2200 2200 // This is not as desirable as it may seem, since Min and Max
2201 2201 // nodes do not have a full set of optimizations.
2202 2202 // And they would interfere, anyway, with 'if' optimizations
2203 2203 // and with CMoveI canonical forms.
2204 2204 switch (id) {
2205 2205 case vmIntrinsics::_min:
2206 2206 result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2207 2207 case vmIntrinsics::_max:
2208 2208 result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2209 2209 default:
2210 2210 ShouldNotReachHere();
2211 2211 }
2212 2212 */
2213 2213 }
2214 2214
2215 2215 inline int
2216 2216 LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
2217 2217 const TypePtr* base_type = TypePtr::NULL_PTR;
2218 2218 if (base != NULL) base_type = _gvn.type(base)->isa_ptr();
2219 2219 if (base_type == NULL) {
2220 2220 // Unknown type.
2221 2221 return Type::AnyPtr;
2222 2222 } else if (base_type == TypePtr::NULL_PTR) {
2223 2223 // Since this is a NULL+long form, we have to switch to a rawptr.
2224 2224 base = _gvn.transform(new (C) CastX2PNode(offset));
2225 2225 offset = MakeConX(0);
2226 2226 return Type::RawPtr;
2227 2227 } else if (base_type->base() == Type::RawPtr) {
2228 2228 return Type::RawPtr;
2229 2229 } else if (base_type->isa_oopptr()) {
2230 2230 // Base is never null => always a heap address.
2231 2231 if (base_type->ptr() == TypePtr::NotNull) {
2232 2232 return Type::OopPtr;
2233 2233 }
2234 2234 // Offset is small => always a heap address.
2235 2235 const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2236 2236 if (offset_type != NULL &&
2237 2237 base_type->offset() == 0 && // (should always be?)
2238 2238 offset_type->_lo >= 0 &&
2239 2239 !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2240 2240 return Type::OopPtr;
2241 2241 }
2242 2242 // Otherwise, it might either be oop+off or NULL+addr.
2243 2243 return Type::AnyPtr;
2244 2244 } else {
2245 2245 // No information:
2246 2246 return Type::AnyPtr;
2247 2247 }
2248 2248 }
2249 2249
2250 2250 inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
2251 2251 int kind = classify_unsafe_addr(base, offset);
2252 2252 if (kind == Type::RawPtr) {
2253 2253 return basic_plus_adr(top(), base, offset);
2254 2254 } else {
2255 2255 return basic_plus_adr(base, offset);
2256 2256 }
2257 2257 }
2258 2258
2259 2259 //--------------------------inline_number_methods-----------------------------
2260 2260 // inline int Integer.numberOfLeadingZeros(int)
2261 2261 // inline int Long.numberOfLeadingZeros(long)
2262 2262 //
2263 2263 // inline int Integer.numberOfTrailingZeros(int)
2264 2264 // inline int Long.numberOfTrailingZeros(long)
2265 2265 //
2266 2266 // inline int Integer.bitCount(int)
2267 2267 // inline int Long.bitCount(long)
2268 2268 //
2269 2269 // inline char Character.reverseBytes(char)
2270 2270 // inline short Short.reverseBytes(short)
2271 2271 // inline int Integer.reverseBytes(int)
2272 2272 // inline long Long.reverseBytes(long)
2273 2273 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2274 2274 Node* arg = argument(0);
2275 2275 Node* n;
2276 2276 switch (id) {
2277 2277 case vmIntrinsics::_numberOfLeadingZeros_i: n = new (C) CountLeadingZerosINode( arg); break;
2278 2278 case vmIntrinsics::_numberOfLeadingZeros_l: n = new (C) CountLeadingZerosLNode( arg); break;
2279 2279 case vmIntrinsics::_numberOfTrailingZeros_i: n = new (C) CountTrailingZerosINode(arg); break;
2280 2280 case vmIntrinsics::_numberOfTrailingZeros_l: n = new (C) CountTrailingZerosLNode(arg); break;
2281 2281 case vmIntrinsics::_bitCount_i: n = new (C) PopCountINode( arg); break;
2282 2282 case vmIntrinsics::_bitCount_l: n = new (C) PopCountLNode( arg); break;
2283 2283 case vmIntrinsics::_reverseBytes_c: n = new (C) ReverseBytesUSNode(0, arg); break;
2284 2284 case vmIntrinsics::_reverseBytes_s: n = new (C) ReverseBytesSNode( 0, arg); break;
2285 2285 case vmIntrinsics::_reverseBytes_i: n = new (C) ReverseBytesINode( 0, arg); break;
2286 2286 case vmIntrinsics::_reverseBytes_l: n = new (C) ReverseBytesLNode( 0, arg); break;
2287 2287 default: fatal_unexpected_iid(id); break;
2288 2288 }
2289 2289 set_result(_gvn.transform(n));
2290 2290 return true;
2291 2291 }
2292 2292
2293 2293 //----------------------------inline_unsafe_access----------------------------
2294 2294
2295 2295 const static BasicType T_ADDRESS_HOLDER = T_LONG;
2296 2296
2297 2297 // Helper that guards and inserts a pre-barrier.
2298 2298 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2299 2299 Node* pre_val, bool need_mem_bar) {
2300 2300 // We could be accessing the referent field of a reference object. If so, when G1
2301 2301 // is enabled, we need to log the value in the referent field in an SATB buffer.
2302 2302 // This routine performs some compile time filters and generates suitable
2303 2303 // runtime filters that guard the pre-barrier code.
2304 2304 // Also add memory barrier for non volatile load from the referent field
2305 2305 // to prevent commoning of loads across safepoint.
2306 2306 if (!UseG1GC && !need_mem_bar)
2307 2307 return;
2308 2308
2309 2309 // Some compile time checks.
2310 2310
2311 2311 // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2312 2312 const TypeX* otype = offset->find_intptr_t_type();
2313 2313 if (otype != NULL && otype->is_con() &&
2314 2314 otype->get_con() != java_lang_ref_Reference::referent_offset) {
2315 2315 // Constant offset but not the reference_offset so just return
2316 2316 return;
2317 2317 }
2318 2318
2319 2319 // We only need to generate the runtime guards for instances.
2320 2320 const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2321 2321 if (btype != NULL) {
2322 2322 if (btype->isa_aryptr()) {
2323 2323 // Array type so nothing to do
2324 2324 return;
2325 2325 }
2326 2326
2327 2327 const TypeInstPtr* itype = btype->isa_instptr();
2328 2328 if (itype != NULL) {
2329 2329 // Can the klass of base_oop be statically determined to be
2330 2330 // _not_ a sub-class of Reference and _not_ Object?
2331 2331 ciKlass* klass = itype->klass();
2332 2332 if ( klass->is_loaded() &&
2333 2333 !klass->is_subtype_of(env()->Reference_klass()) &&
2334 2334 !env()->Object_klass()->is_subtype_of(klass)) {
2335 2335 return;
2336 2336 }
2337 2337 }
2338 2338 }
2339 2339
2340 2340 // The compile time filters did not reject base_oop/offset so
2341 2341 // we need to generate the following runtime filters
2342 2342 //
2343 2343 // if (offset == java_lang_ref_Reference::_reference_offset) {
2344 2344 // if (instance_of(base, java.lang.ref.Reference)) {
2345 2345 // pre_barrier(_, pre_val, ...);
2346 2346 // }
2347 2347 // }
2348 2348
2349 2349 float likely = PROB_LIKELY( 0.999);
2350 2350 float unlikely = PROB_UNLIKELY(0.999);
2351 2351
2352 2352 IdealKit ideal(this);
2353 2353 #define __ ideal.
2354 2354
2355 2355 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
2356 2356
2357 2357 __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
2358 2358 // Update graphKit memory and control from IdealKit.
2359 2359 sync_kit(ideal);
2360 2360
2361 2361 Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
2362 2362 Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
2363 2363
2364 2364 // Update IdealKit memory and control from graphKit.
2365 2365 __ sync_kit(this);
2366 2366
2367 2367 Node* one = __ ConI(1);
2368 2368 // is_instof == 0 if base_oop == NULL
2369 2369 __ if_then(is_instof, BoolTest::eq, one, unlikely); {
2370 2370
2371 2371 // Update graphKit from IdeakKit.
2372 2372 sync_kit(ideal);
2373 2373
2374 2374 // Use the pre-barrier to record the value in the referent field
2375 2375 pre_barrier(false /* do_load */,
2376 2376 __ ctrl(),
2377 2377 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2378 2378 pre_val /* pre_val */,
2379 2379 T_OBJECT);
2380 2380 if (need_mem_bar) {
2381 2381 // Add memory barrier to prevent commoning reads from this field
2382 2382 // across safepoint since GC can change its value.
2383 2383 insert_mem_bar(Op_MemBarCPUOrder);
2384 2384 }
2385 2385 // Update IdealKit from graphKit.
2386 2386 __ sync_kit(this);
2387 2387
2388 2388 } __ end_if(); // _ref_type != ref_none
2389 2389 } __ end_if(); // offset == referent_offset
2390 2390
2391 2391 // Final sync IdealKit and GraphKit.
2392 2392 final_sync(ideal);
2393 2393 #undef __
2394 2394 }
2395 2395
2396 2396
2397 2397 // Interpret Unsafe.fieldOffset cookies correctly:
2398 2398 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
2399 2399
2400 2400 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
2401 2401 // Attempt to infer a sharper value type from the offset and base type.
2402 2402 ciKlass* sharpened_klass = NULL;
2403 2403
2404 2404 // See if it is an instance field, with an object type.
2405 2405 if (alias_type->field() != NULL) {
2406 2406 assert(!is_native_ptr, "native pointer op cannot use a java address");
2407 2407 if (alias_type->field()->type()->is_klass()) {
2408 2408 sharpened_klass = alias_type->field()->type()->as_klass();
2409 2409 }
2410 2410 }
2411 2411
2412 2412 // See if it is a narrow oop array.
2413 2413 if (adr_type->isa_aryptr()) {
2414 2414 if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2415 2415 const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2416 2416 if (elem_type != NULL) {
2417 2417 sharpened_klass = elem_type->klass();
2418 2418 }
2419 2419 }
2420 2420 }
2421 2421
2422 2422 // The sharpened class might be unloaded if there is no class loader
2423 2423 // contraint in place.
2424 2424 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2425 2425 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2426 2426
2427 2427 #ifndef PRODUCT
2428 2428 if (C->print_intrinsics() || C->print_inlining()) {
2429 2429 tty->print(" from base type: "); adr_type->dump();
2430 2430 tty->print(" sharpened value: "); tjp->dump();
2431 2431 }
2432 2432 #endif
2433 2433 // Sharpen the value type.
2434 2434 return tjp;
2435 2435 }
2436 2436 return NULL;
2437 2437 }
2438 2438
2439 2439 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
2440 2440 if (callee()->is_static()) return false; // caller must have the capability!
2441 2441
2442 2442 #ifndef PRODUCT
2443 2443 {
2444 2444 ResourceMark rm;
2445 2445 // Check the signatures.
2446 2446 ciSignature* sig = callee()->signature();
2447 2447 #ifdef ASSERT
2448 2448 if (!is_store) {
2449 2449 // Object getObject(Object base, int/long offset), etc.
2450 2450 BasicType rtype = sig->return_type()->basic_type();
2451 2451 if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2452 2452 rtype = T_ADDRESS; // it is really a C void*
2453 2453 assert(rtype == type, "getter must return the expected value");
2454 2454 if (!is_native_ptr) {
2455 2455 assert(sig->count() == 2, "oop getter has 2 arguments");
2456 2456 assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2457 2457 assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2458 2458 } else {
2459 2459 assert(sig->count() == 1, "native getter has 1 argument");
2460 2460 assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
2461 2461 }
2462 2462 } else {
2463 2463 // void putObject(Object base, int/long offset, Object x), etc.
2464 2464 assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2465 2465 if (!is_native_ptr) {
2466 2466 assert(sig->count() == 3, "oop putter has 3 arguments");
2467 2467 assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2468 2468 assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2469 2469 } else {
2470 2470 assert(sig->count() == 2, "native putter has 2 arguments");
2471 2471 assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long");
2472 2472 }
2473 2473 BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2474 2474 if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name())
2475 2475 vtype = T_ADDRESS; // it is really a C void*
2476 2476 assert(vtype == type, "putter must accept the expected value");
2477 2477 }
2478 2478 #endif // ASSERT
2479 2479 }
2480 2480 #endif //PRODUCT
2481 2481
2482 2482 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2483 2483
2484 2484 Node* receiver = argument(0); // type: oop
2485 2485
2486 2486 // Build address expression. See the code in inline_unsafe_prefetch.
2487 2487 Node* adr;
2488 2488 Node* heap_base_oop = top();
2489 2489 Node* offset = top();
2490 2490 Node* val;
2491 2491
2492 2492 if (!is_native_ptr) {
2493 2493 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2494 2494 Node* base = argument(1); // type: oop
2495 2495 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2496 2496 offset = argument(2); // type: long
2497 2497 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2498 2498 // to be plain byte offsets, which are also the same as those accepted
2499 2499 // by oopDesc::field_base.
2500 2500 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2501 2501 "fieldOffset must be byte-scaled");
2502 2502 // 32-bit machines ignore the high half!
2503 2503 offset = ConvL2X(offset);
2504 2504 adr = make_unsafe_address(base, offset);
2505 2505 heap_base_oop = base;
2506 2506 val = is_store ? argument(4) : NULL;
2507 2507 } else {
2508 2508 Node* ptr = argument(1); // type: long
2509 2509 ptr = ConvL2X(ptr); // adjust Java long to machine word
2510 2510 adr = make_unsafe_address(NULL, ptr);
2511 2511 val = is_store ? argument(3) : NULL;
2512 2512 }
2513 2513
2514 2514 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2515 2515
2516 2516 // First guess at the value type.
2517 2517 const Type *value_type = Type::get_const_basic_type(type);
2518 2518
2519 2519 // Try to categorize the address. If it comes up as TypeJavaPtr::BOTTOM,
2520 2520 // there was not enough information to nail it down.
2521 2521 Compile::AliasType* alias_type = C->alias_type(adr_type);
2522 2522 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2523 2523
2524 2524 // We will need memory barriers unless we can determine a unique
2525 2525 // alias category for this reference. (Note: If for some reason
2526 2526 // the barriers get omitted and the unsafe reference begins to "pollute"
2527 2527 // the alias analysis of the rest of the graph, either Compile::can_alias
2528 2528 // or Compile::must_alias will throw a diagnostic assert.)
2529 2529 bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2530 2530
2531 2531 // If we are reading the value of the referent field of a Reference
2532 2532 // object (either by using Unsafe directly or through reflection)
2533 2533 // then, if G1 is enabled, we need to record the referent in an
2534 2534 // SATB log buffer using the pre-barrier mechanism.
2535 2535 // Also we need to add memory barrier to prevent commoning reads
2536 2536 // from this field across safepoint since GC can change its value.
2537 2537 bool need_read_barrier = !is_native_ptr && !is_store &&
2538 2538 offset != top() && heap_base_oop != top();
2539 2539
2540 2540 if (!is_store && type == T_OBJECT) {
2541 2541 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2542 2542 if (tjp != NULL) {
2543 2543 value_type = tjp;
2544 2544 }
2545 2545 }
2546 2546
2547 2547 receiver = null_check(receiver);
2548 2548 if (stopped()) {
2549 2549 return true;
2550 2550 }
2551 2551 // Heap pointers get a null-check from the interpreter,
2552 2552 // as a courtesy. However, this is not guaranteed by Unsafe,
2553 2553 // and it is not possible to fully distinguish unintended nulls
2554 2554 // from intended ones in this API.
2555 2555
2556 2556 if (is_volatile) {
2557 2557 // We need to emit leading and trailing CPU membars (see below) in
2558 2558 // addition to memory membars when is_volatile. This is a little
2559 2559 // too strong, but avoids the need to insert per-alias-type
2560 2560 // volatile membars (for stores; compare Parse::do_put_xxx), which
2561 2561 // we cannot do effectively here because we probably only have a
2562 2562 // rough approximation of type.
2563 2563 need_mem_bar = true;
2564 2564 // For Stores, place a memory ordering barrier now.
2565 2565 if (is_store) {
2566 2566 insert_mem_bar(Op_MemBarRelease);
2567 2567 } else {
2568 2568 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2569 2569 insert_mem_bar(Op_MemBarVolatile);
2570 2570 }
2571 2571 }
2572 2572 }
2573 2573
2574 2574 // Memory barrier to prevent normal and 'unsafe' accesses from
2575 2575 // bypassing each other. Happens after null checks, so the
2576 2576 // exception paths do not take memory state from the memory barrier,
2577 2577 // so there's no problems making a strong assert about mixing users
2578 2578 // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar
2579 2579 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2580 2580 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2581 2581
2582 2582 if (!is_store) {
2583 2583 Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile);
2584 2584 // load value
2585 2585 switch (type) {
2586 2586 case T_BOOLEAN:
2587 2587 case T_CHAR:
2588 2588 case T_BYTE:
2589 2589 case T_SHORT:
2590 2590 case T_INT:
2591 2591 case T_LONG:
2592 2592 case T_FLOAT:
↓ open down ↓ |
2592 lines elided |
↑ open up ↑ |
2593 2593 case T_DOUBLE:
2594 2594 break;
2595 2595 case T_OBJECT:
2596 2596 if (need_read_barrier) {
2597 2597 insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2598 2598 }
2599 2599 break;
2600 2600 case T_ADDRESS:
2601 2601 // Cast to an int type.
2602 2602 p = _gvn.transform(new (C) CastP2XNode(NULL, p));
2603 - p = ConvX2L(p);
2603 + p = ConvX2UL(p);
2604 2604 break;
2605 2605 default:
2606 2606 fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2607 2607 break;
2608 2608 }
2609 2609 // The load node has the control of the preceding MemBarCPUOrder. All
2610 2610 // following nodes will have the control of the MemBarCPUOrder inserted at
2611 2611 // the end of this method. So, pushing the load onto the stack at a later
2612 2612 // point is fine.
2613 2613 set_result(p);
2614 2614 } else {
2615 2615 // place effect of store into memory
2616 2616 switch (type) {
2617 2617 case T_DOUBLE:
2618 2618 val = dstore_rounding(val);
2619 2619 break;
2620 2620 case T_ADDRESS:
2621 2621 // Repackage the long as a pointer.
2622 2622 val = ConvL2X(val);
2623 2623 val = _gvn.transform(new (C) CastX2PNode(val));
2624 2624 break;
2625 2625 }
2626 2626
2627 2627 MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2628 2628 if (type != T_OBJECT ) {
2629 2629 (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
2630 2630 } else {
2631 2631 // Possibly an oop being stored to Java heap or native memory
2632 2632 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2633 2633 // oop to Java heap.
2634 2634 (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2635 2635 } else {
2636 2636 // We can't tell at compile time if we are storing in the Java heap or outside
2637 2637 // of it. So we need to emit code to conditionally do the proper type of
2638 2638 // store.
2639 2639
2640 2640 IdealKit ideal(this);
2641 2641 #define __ ideal.
2642 2642 // QQQ who knows what probability is here??
2643 2643 __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2644 2644 // Sync IdealKit and graphKit.
2645 2645 sync_kit(ideal);
2646 2646 Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2647 2647 // Update IdealKit memory.
2648 2648 __ sync_kit(this);
2649 2649 } __ else_(); {
2650 2650 __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile);
2651 2651 } __ end_if();
2652 2652 // Final sync IdealKit and GraphKit.
2653 2653 final_sync(ideal);
2654 2654 #undef __
2655 2655 }
2656 2656 }
2657 2657 }
2658 2658
2659 2659 if (is_volatile) {
2660 2660 if (!is_store) {
2661 2661 insert_mem_bar(Op_MemBarAcquire);
2662 2662 } else {
2663 2663 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2664 2664 insert_mem_bar(Op_MemBarVolatile);
2665 2665 }
2666 2666 }
2667 2667 }
2668 2668
2669 2669 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2670 2670
2671 2671 return true;
2672 2672 }
2673 2673
2674 2674 //----------------------------inline_unsafe_prefetch----------------------------
2675 2675
2676 2676 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2677 2677 #ifndef PRODUCT
2678 2678 {
2679 2679 ResourceMark rm;
2680 2680 // Check the signatures.
2681 2681 ciSignature* sig = callee()->signature();
2682 2682 #ifdef ASSERT
2683 2683 // Object getObject(Object base, int/long offset), etc.
2684 2684 BasicType rtype = sig->return_type()->basic_type();
2685 2685 if (!is_native_ptr) {
2686 2686 assert(sig->count() == 2, "oop prefetch has 2 arguments");
2687 2687 assert(sig->type_at(0)->basic_type() == T_OBJECT, "prefetch base is object");
2688 2688 assert(sig->type_at(1)->basic_type() == T_LONG, "prefetcha offset is correct");
2689 2689 } else {
2690 2690 assert(sig->count() == 1, "native prefetch has 1 argument");
2691 2691 assert(sig->type_at(0)->basic_type() == T_LONG, "prefetch base is long");
2692 2692 }
2693 2693 #endif // ASSERT
2694 2694 }
2695 2695 #endif // !PRODUCT
2696 2696
2697 2697 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2698 2698
2699 2699 const int idx = is_static ? 0 : 1;
2700 2700 if (!is_static) {
2701 2701 null_check_receiver();
2702 2702 if (stopped()) {
2703 2703 return true;
2704 2704 }
2705 2705 }
2706 2706
2707 2707 // Build address expression. See the code in inline_unsafe_access.
2708 2708 Node *adr;
2709 2709 if (!is_native_ptr) {
2710 2710 // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2711 2711 Node* base = argument(idx + 0); // type: oop
2712 2712 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2713 2713 Node* offset = argument(idx + 1); // type: long
2714 2714 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2715 2715 // to be plain byte offsets, which are also the same as those accepted
2716 2716 // by oopDesc::field_base.
2717 2717 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2718 2718 "fieldOffset must be byte-scaled");
2719 2719 // 32-bit machines ignore the high half!
2720 2720 offset = ConvL2X(offset);
2721 2721 adr = make_unsafe_address(base, offset);
2722 2722 } else {
2723 2723 Node* ptr = argument(idx + 0); // type: long
2724 2724 ptr = ConvL2X(ptr); // adjust Java long to machine word
2725 2725 adr = make_unsafe_address(NULL, ptr);
2726 2726 }
2727 2727
2728 2728 // Generate the read or write prefetch
2729 2729 Node *prefetch;
2730 2730 if (is_store) {
2731 2731 prefetch = new (C) PrefetchWriteNode(i_o(), adr);
2732 2732 } else {
2733 2733 prefetch = new (C) PrefetchReadNode(i_o(), adr);
2734 2734 }
2735 2735 prefetch->init_req(0, control());
2736 2736 set_i_o(_gvn.transform(prefetch));
2737 2737
2738 2738 return true;
2739 2739 }
2740 2740
2741 2741 //----------------------------inline_unsafe_load_store----------------------------
2742 2742 // This method serves a couple of different customers (depending on LoadStoreKind):
2743 2743 //
2744 2744 // LS_cmpxchg:
2745 2745 // public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
2746 2746 // public final native boolean compareAndSwapInt( Object o, long offset, int expected, int x);
2747 2747 // public final native boolean compareAndSwapLong( Object o, long offset, long expected, long x);
2748 2748 //
2749 2749 // LS_xadd:
2750 2750 // public int getAndAddInt( Object o, long offset, int delta)
2751 2751 // public long getAndAddLong(Object o, long offset, long delta)
2752 2752 //
2753 2753 // LS_xchg:
2754 2754 // int getAndSet(Object o, long offset, int newValue)
2755 2755 // long getAndSet(Object o, long offset, long newValue)
2756 2756 // Object getAndSet(Object o, long offset, Object newValue)
2757 2757 //
2758 2758 bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
2759 2759 // This basic scheme here is the same as inline_unsafe_access, but
2760 2760 // differs in enough details that combining them would make the code
2761 2761 // overly confusing. (This is a true fact! I originally combined
2762 2762 // them, but even I was confused by it!) As much code/comments as
2763 2763 // possible are retained from inline_unsafe_access though to make
2764 2764 // the correspondences clearer. - dl
2765 2765
2766 2766 if (callee()->is_static()) return false; // caller must have the capability!
2767 2767
2768 2768 #ifndef PRODUCT
2769 2769 BasicType rtype;
2770 2770 {
2771 2771 ResourceMark rm;
2772 2772 // Check the signatures.
2773 2773 ciSignature* sig = callee()->signature();
2774 2774 rtype = sig->return_type()->basic_type();
2775 2775 if (kind == LS_xadd || kind == LS_xchg) {
2776 2776 // Check the signatures.
2777 2777 #ifdef ASSERT
2778 2778 assert(rtype == type, "get and set must return the expected type");
2779 2779 assert(sig->count() == 3, "get and set has 3 arguments");
2780 2780 assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2781 2781 assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2782 2782 assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2783 2783 #endif // ASSERT
2784 2784 } else if (kind == LS_cmpxchg) {
2785 2785 // Check the signatures.
2786 2786 #ifdef ASSERT
2787 2787 assert(rtype == T_BOOLEAN, "CAS must return boolean");
2788 2788 assert(sig->count() == 4, "CAS has 4 arguments");
2789 2789 assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2790 2790 assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2791 2791 #endif // ASSERT
2792 2792 } else {
2793 2793 ShouldNotReachHere();
2794 2794 }
2795 2795 }
2796 2796 #endif //PRODUCT
2797 2797
2798 2798 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
2799 2799
2800 2800 // Get arguments:
2801 2801 Node* receiver = NULL;
2802 2802 Node* base = NULL;
2803 2803 Node* offset = NULL;
2804 2804 Node* oldval = NULL;
2805 2805 Node* newval = NULL;
2806 2806 if (kind == LS_cmpxchg) {
2807 2807 const bool two_slot_type = type2size[type] == 2;
2808 2808 receiver = argument(0); // type: oop
2809 2809 base = argument(1); // type: oop
2810 2810 offset = argument(2); // type: long
2811 2811 oldval = argument(4); // type: oop, int, or long
2812 2812 newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long
2813 2813 } else if (kind == LS_xadd || kind == LS_xchg){
2814 2814 receiver = argument(0); // type: oop
2815 2815 base = argument(1); // type: oop
2816 2816 offset = argument(2); // type: long
2817 2817 oldval = NULL;
2818 2818 newval = argument(4); // type: oop, int, or long
2819 2819 }
2820 2820
2821 2821 // Null check receiver.
2822 2822 receiver = null_check(receiver);
2823 2823 if (stopped()) {
2824 2824 return true;
2825 2825 }
2826 2826
2827 2827 // Build field offset expression.
2828 2828 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2829 2829 // to be plain byte offsets, which are also the same as those accepted
2830 2830 // by oopDesc::field_base.
2831 2831 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2832 2832 // 32-bit machines ignore the high half of long offsets
2833 2833 offset = ConvL2X(offset);
2834 2834 Node* adr = make_unsafe_address(base, offset);
2835 2835 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2836 2836
2837 2837 // For CAS, unlike inline_unsafe_access, there seems no point in
2838 2838 // trying to refine types. Just use the coarse types here.
2839 2839 const Type *value_type = Type::get_const_basic_type(type);
2840 2840 Compile::AliasType* alias_type = C->alias_type(adr_type);
2841 2841 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2842 2842
2843 2843 if (kind == LS_xchg && type == T_OBJECT) {
2844 2844 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2845 2845 if (tjp != NULL) {
2846 2846 value_type = tjp;
2847 2847 }
2848 2848 }
2849 2849
2850 2850 int alias_idx = C->get_alias_index(adr_type);
2851 2851
2852 2852 // Memory-model-wise, a LoadStore acts like a little synchronized
2853 2853 // block, so needs barriers on each side. These don't translate
2854 2854 // into actual barriers on most machines, but we still need rest of
2855 2855 // compiler to respect ordering.
2856 2856
2857 2857 insert_mem_bar(Op_MemBarRelease);
2858 2858 insert_mem_bar(Op_MemBarCPUOrder);
2859 2859
2860 2860 // 4984716: MemBars must be inserted before this
2861 2861 // memory node in order to avoid a false
2862 2862 // dependency which will confuse the scheduler.
2863 2863 Node *mem = memory(alias_idx);
2864 2864
2865 2865 // For now, we handle only those cases that actually exist: ints,
2866 2866 // longs, and Object. Adding others should be straightforward.
2867 2867 Node* load_store;
2868 2868 switch(type) {
2869 2869 case T_INT:
2870 2870 if (kind == LS_xadd) {
2871 2871 load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
2872 2872 } else if (kind == LS_xchg) {
2873 2873 load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
2874 2874 } else if (kind == LS_cmpxchg) {
2875 2875 load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
2876 2876 } else {
2877 2877 ShouldNotReachHere();
2878 2878 }
2879 2879 break;
2880 2880 case T_LONG:
2881 2881 if (kind == LS_xadd) {
2882 2882 load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2883 2883 } else if (kind == LS_xchg) {
2884 2884 load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2885 2885 } else if (kind == LS_cmpxchg) {
2886 2886 load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2887 2887 } else {
2888 2888 ShouldNotReachHere();
2889 2889 }
2890 2890 break;
2891 2891 case T_OBJECT:
2892 2892 // Transformation of a value which could be NULL pointer (CastPP #NULL)
2893 2893 // could be delayed during Parse (for example, in adjust_map_after_if()).
2894 2894 // Execute transformation here to avoid barrier generation in such case.
2895 2895 if (_gvn.type(newval) == TypePtr::NULL_PTR)
2896 2896 newval = _gvn.makecon(TypePtr::NULL_PTR);
2897 2897
2898 2898 // Reference stores need a store barrier.
2899 2899 if (kind == LS_xchg) {
2900 2900 // If pre-barrier must execute before the oop store, old value will require do_load here.
2901 2901 if (!can_move_pre_barrier()) {
2902 2902 pre_barrier(true /* do_load*/,
2903 2903 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2904 2904 NULL /* pre_val*/,
2905 2905 T_OBJECT);
2906 2906 } // Else move pre_barrier to use load_store value, see below.
2907 2907 } else if (kind == LS_cmpxchg) {
2908 2908 // Same as for newval above:
2909 2909 if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2910 2910 oldval = _gvn.makecon(TypePtr::NULL_PTR);
2911 2911 }
2912 2912 // The only known value which might get overwritten is oldval.
2913 2913 pre_barrier(false /* do_load */,
2914 2914 control(), NULL, NULL, max_juint, NULL, NULL,
2915 2915 oldval /* pre_val */,
2916 2916 T_OBJECT);
2917 2917 } else {
2918 2918 ShouldNotReachHere();
2919 2919 }
2920 2920
2921 2921 #ifdef _LP64
2922 2922 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2923 2923 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2924 2924 if (kind == LS_xchg) {
2925 2925 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2926 2926 newval_enc, adr_type, value_type->make_narrowoop()));
2927 2927 } else {
2928 2928 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2929 2929 Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2930 2930 load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2931 2931 newval_enc, oldval_enc));
2932 2932 }
2933 2933 } else
2934 2934 #endif
2935 2935 {
2936 2936 if (kind == LS_xchg) {
2937 2937 load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2938 2938 } else {
2939 2939 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2940 2940 load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2941 2941 }
2942 2942 }
2943 2943 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2944 2944 break;
2945 2945 default:
2946 2946 fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2947 2947 break;
2948 2948 }
2949 2949
2950 2950 // SCMemProjNodes represent the memory state of a LoadStore. Their
2951 2951 // main role is to prevent LoadStore nodes from being optimized away
2952 2952 // when their results aren't used.
2953 2953 Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2954 2954 set_memory(proj, alias_idx);
2955 2955
2956 2956 if (type == T_OBJECT && kind == LS_xchg) {
2957 2957 #ifdef _LP64
2958 2958 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2959 2959 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2960 2960 }
2961 2961 #endif
2962 2962 if (can_move_pre_barrier()) {
2963 2963 // Don't need to load pre_val. The old value is returned by load_store.
2964 2964 // The pre_barrier can execute after the xchg as long as no safepoint
2965 2965 // gets inserted between them.
2966 2966 pre_barrier(false /* do_load */,
2967 2967 control(), NULL, NULL, max_juint, NULL, NULL,
2968 2968 load_store /* pre_val */,
2969 2969 T_OBJECT);
2970 2970 }
2971 2971 }
2972 2972
2973 2973 // Add the trailing membar surrounding the access
2974 2974 insert_mem_bar(Op_MemBarCPUOrder);
2975 2975 insert_mem_bar(Op_MemBarAcquire);
2976 2976
2977 2977 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
2978 2978 set_result(load_store);
2979 2979 return true;
2980 2980 }
2981 2981
2982 2982 //----------------------------inline_unsafe_ordered_store----------------------
2983 2983 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
2984 2984 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
2985 2985 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
2986 2986 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2987 2987 // This is another variant of inline_unsafe_access, differing in
2988 2988 // that it always issues store-store ("release") barrier and ensures
2989 2989 // store-atomicity (which only matters for "long").
2990 2990
2991 2991 if (callee()->is_static()) return false; // caller must have the capability!
2992 2992
2993 2993 #ifndef PRODUCT
2994 2994 {
2995 2995 ResourceMark rm;
2996 2996 // Check the signatures.
2997 2997 ciSignature* sig = callee()->signature();
2998 2998 #ifdef ASSERT
2999 2999 BasicType rtype = sig->return_type()->basic_type();
3000 3000 assert(rtype == T_VOID, "must return void");
3001 3001 assert(sig->count() == 3, "has 3 arguments");
3002 3002 assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
3003 3003 assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
3004 3004 #endif // ASSERT
3005 3005 }
3006 3006 #endif //PRODUCT
3007 3007
3008 3008 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
3009 3009
3010 3010 // Get arguments:
3011 3011 Node* receiver = argument(0); // type: oop
3012 3012 Node* base = argument(1); // type: oop
3013 3013 Node* offset = argument(2); // type: long
3014 3014 Node* val = argument(4); // type: oop, int, or long
3015 3015
3016 3016 // Null check receiver.
3017 3017 receiver = null_check(receiver);
3018 3018 if (stopped()) {
3019 3019 return true;
3020 3020 }
3021 3021
3022 3022 // Build field offset expression.
3023 3023 assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
3024 3024 // 32-bit machines ignore the high half of long offsets
3025 3025 offset = ConvL2X(offset);
3026 3026 Node* adr = make_unsafe_address(base, offset);
3027 3027 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
3028 3028 const Type *value_type = Type::get_const_basic_type(type);
3029 3029 Compile::AliasType* alias_type = C->alias_type(adr_type);
3030 3030
3031 3031 insert_mem_bar(Op_MemBarRelease);
3032 3032 insert_mem_bar(Op_MemBarCPUOrder);
3033 3033 // Ensure that the store is atomic for longs:
3034 3034 const bool require_atomic_access = true;
3035 3035 Node* store;
3036 3036 if (type == T_OBJECT) // reference stores need a store barrier.
3037 3037 store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
3038 3038 else {
3039 3039 store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
3040 3040 }
3041 3041 insert_mem_bar(Op_MemBarCPUOrder);
3042 3042 return true;
3043 3043 }
3044 3044
3045 3045 bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
3046 3046 // Regardless of form, don't allow previous ld/st to move down,
3047 3047 // then issue acquire, release, or volatile mem_bar.
3048 3048 insert_mem_bar(Op_MemBarCPUOrder);
3049 3049 switch(id) {
3050 3050 case vmIntrinsics::_loadFence:
3051 3051 insert_mem_bar(Op_LoadFence);
3052 3052 return true;
3053 3053 case vmIntrinsics::_storeFence:
3054 3054 insert_mem_bar(Op_StoreFence);
3055 3055 return true;
3056 3056 case vmIntrinsics::_fullFence:
3057 3057 insert_mem_bar(Op_MemBarVolatile);
3058 3058 return true;
3059 3059 default:
3060 3060 fatal_unexpected_iid(id);
3061 3061 return false;
3062 3062 }
3063 3063 }
3064 3064
3065 3065 bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
3066 3066 if (!kls->is_Con()) {
3067 3067 return true;
3068 3068 }
3069 3069 const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
3070 3070 if (klsptr == NULL) {
3071 3071 return true;
3072 3072 }
3073 3073 ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
3074 3074 // don't need a guard for a klass that is already initialized
3075 3075 return !ik->is_initialized();
3076 3076 }
3077 3077
3078 3078 //----------------------------inline_unsafe_allocate---------------------------
3079 3079 // public native Object sun.misc.Unsafe.allocateInstance(Class<?> cls);
3080 3080 bool LibraryCallKit::inline_unsafe_allocate() {
3081 3081 if (callee()->is_static()) return false; // caller must have the capability!
3082 3082
3083 3083 null_check_receiver(); // null-check, then ignore
3084 3084 Node* cls = null_check(argument(1));
3085 3085 if (stopped()) return true;
3086 3086
3087 3087 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3088 3088 kls = null_check(kls);
3089 3089 if (stopped()) return true; // argument was like int.class
3090 3090
3091 3091 Node* test = NULL;
3092 3092 if (LibraryCallKit::klass_needs_init_guard(kls)) {
3093 3093 // Note: The argument might still be an illegal value like
3094 3094 // Serializable.class or Object[].class. The runtime will handle it.
3095 3095 // But we must make an explicit check for initialization.
3096 3096 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3097 3097 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3098 3098 // can generate code to load it as unsigned byte.
3099 3099 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3100 3100 Node* bits = intcon(InstanceKlass::fully_initialized);
3101 3101 test = _gvn.transform(new (C) SubINode(inst, bits));
3102 3102 // The 'test' is non-zero if we need to take a slow path.
3103 3103 }
3104 3104
3105 3105 Node* obj = new_instance(kls, test);
3106 3106 set_result(obj);
3107 3107 return true;
3108 3108 }
3109 3109
3110 3110 #ifdef TRACE_HAVE_INTRINSICS
3111 3111 /*
3112 3112 * oop -> myklass
3113 3113 * myklass->trace_id |= USED
3114 3114 * return myklass->trace_id & ~0x3
3115 3115 */
3116 3116 bool LibraryCallKit::inline_native_classID() {
3117 3117 null_check_receiver(); // null-check, then ignore
3118 3118 Node* cls = null_check(argument(1), T_OBJECT);
3119 3119 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3120 3120 kls = null_check(kls, T_OBJECT);
3121 3121 ByteSize offset = TRACE_ID_OFFSET;
3122 3122 Node* insp = basic_plus_adr(kls, in_bytes(offset));
3123 3123 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
3124 3124 Node* bits = longcon(~0x03l); // ignore bit 0 & 1
3125 3125 Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits));
3126 3126 Node* clsused = longcon(0x01l); // set the class bit
3127 3127 Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused));
3128 3128
3129 3129 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
3130 3130 store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
3131 3131 set_result(andl);
3132 3132 return true;
3133 3133 }
3134 3134
3135 3135 bool LibraryCallKit::inline_native_threadID() {
3136 3136 Node* tls_ptr = NULL;
3137 3137 Node* cur_thr = generate_current_thread(tls_ptr);
3138 3138 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3139 3139 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3140 3140 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
3141 3141
3142 3142 Node* threadid = NULL;
3143 3143 size_t thread_id_size = OSThread::thread_id_size();
3144 3144 if (thread_id_size == (size_t) BytesPerLong) {
3145 3145 threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered));
3146 3146 } else if (thread_id_size == (size_t) BytesPerInt) {
3147 3147 threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered);
3148 3148 } else {
3149 3149 ShouldNotReachHere();
3150 3150 }
3151 3151 set_result(threadid);
3152 3152 return true;
3153 3153 }
3154 3154 #endif
3155 3155
3156 3156 //------------------------inline_native_time_funcs--------------
3157 3157 // inline code for System.currentTimeMillis() and System.nanoTime()
3158 3158 // these have the same type and signature
3159 3159 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3160 3160 const TypeFunc* tf = OptoRuntime::void_long_Type();
3161 3161 const TypePtr* no_memory_effects = NULL;
3162 3162 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3163 3163 Node* value = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+0));
3164 3164 #ifdef ASSERT
3165 3165 Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+1));
3166 3166 assert(value_top == top(), "second value must be top");
3167 3167 #endif
3168 3168 set_result(value);
3169 3169 return true;
3170 3170 }
3171 3171
3172 3172 //------------------------inline_native_currentThread------------------
3173 3173 bool LibraryCallKit::inline_native_currentThread() {
3174 3174 Node* junk = NULL;
3175 3175 set_result(generate_current_thread(junk));
3176 3176 return true;
3177 3177 }
3178 3178
3179 3179 //------------------------inline_native_isInterrupted------------------
3180 3180 // private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
3181 3181 bool LibraryCallKit::inline_native_isInterrupted() {
3182 3182 // Add a fast path to t.isInterrupted(clear_int):
3183 3183 // (t == Thread.current() &&
3184 3184 // (!TLS._osthread._interrupted || WINDOWS_ONLY(false) NOT_WINDOWS(!clear_int)))
3185 3185 // ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
3186 3186 // So, in the common case that the interrupt bit is false,
3187 3187 // we avoid making a call into the VM. Even if the interrupt bit
3188 3188 // is true, if the clear_int argument is false, we avoid the VM call.
3189 3189 // However, if the receiver is not currentThread, we must call the VM,
3190 3190 // because there must be some locking done around the operation.
3191 3191
3192 3192 // We only go to the fast case code if we pass two guards.
3193 3193 // Paths which do not pass are accumulated in the slow_region.
3194 3194
3195 3195 enum {
3196 3196 no_int_result_path = 1, // t == Thread.current() && !TLS._osthread._interrupted
3197 3197 no_clear_result_path = 2, // t == Thread.current() && TLS._osthread._interrupted && !clear_int
3198 3198 slow_result_path = 3, // slow path: t.isInterrupted(clear_int)
3199 3199 PATH_LIMIT
3200 3200 };
3201 3201
3202 3202 // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
3203 3203 // out of the function.
3204 3204 insert_mem_bar(Op_MemBarCPUOrder);
3205 3205
3206 3206 RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
3207 3207 PhiNode* result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
3208 3208
3209 3209 RegionNode* slow_region = new (C) RegionNode(1);
3210 3210 record_for_igvn(slow_region);
3211 3211
3212 3212 // (a) Receiving thread must be the current thread.
3213 3213 Node* rec_thr = argument(0);
3214 3214 Node* tls_ptr = NULL;
3215 3215 Node* cur_thr = generate_current_thread(tls_ptr);
3216 3216 Node* cmp_thr = _gvn.transform(new (C) CmpPNode(cur_thr, rec_thr));
3217 3217 Node* bol_thr = _gvn.transform(new (C) BoolNode(cmp_thr, BoolTest::ne));
3218 3218
3219 3219 generate_slow_guard(bol_thr, slow_region);
3220 3220
3221 3221 // (b) Interrupt bit on TLS must be false.
3222 3222 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3223 3223 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3224 3224 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3225 3225
3226 3226 // Set the control input on the field _interrupted read to prevent it floating up.
3227 3227 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
3228 3228 Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
3229 3229 Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
3230 3230
3231 3231 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3232 3232
3233 3233 // First fast path: if (!TLS._interrupted) return false;
3234 3234 Node* false_bit = _gvn.transform(new (C) IfFalseNode(iff_bit));
3235 3235 result_rgn->init_req(no_int_result_path, false_bit);
3236 3236 result_val->init_req(no_int_result_path, intcon(0));
3237 3237
3238 3238 // drop through to next case
3239 3239 set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)));
3240 3240
3241 3241 #ifndef TARGET_OS_FAMILY_windows
3242 3242 // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
3243 3243 Node* clr_arg = argument(1);
3244 3244 Node* cmp_arg = _gvn.transform(new (C) CmpINode(clr_arg, intcon(0)));
3245 3245 Node* bol_arg = _gvn.transform(new (C) BoolNode(cmp_arg, BoolTest::ne));
3246 3246 IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
3247 3247
3248 3248 // Second fast path: ... else if (!clear_int) return true;
3249 3249 Node* false_arg = _gvn.transform(new (C) IfFalseNode(iff_arg));
3250 3250 result_rgn->init_req(no_clear_result_path, false_arg);
3251 3251 result_val->init_req(no_clear_result_path, intcon(1));
3252 3252
3253 3253 // drop through to next case
3254 3254 set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)));
3255 3255 #else
3256 3256 // To return true on Windows you must read the _interrupted field
3257 3257 // and check the the event state i.e. take the slow path.
3258 3258 #endif // TARGET_OS_FAMILY_windows
3259 3259
3260 3260 // (d) Otherwise, go to the slow path.
3261 3261 slow_region->add_req(control());
3262 3262 set_control( _gvn.transform(slow_region));
3263 3263
3264 3264 if (stopped()) {
3265 3265 // There is no slow path.
3266 3266 result_rgn->init_req(slow_result_path, top());
3267 3267 result_val->init_req(slow_result_path, top());
3268 3268 } else {
3269 3269 // non-virtual because it is a private non-static
3270 3270 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
3271 3271
3272 3272 Node* slow_val = set_results_for_java_call(slow_call);
3273 3273 // this->control() comes from set_results_for_java_call
3274 3274
3275 3275 Node* fast_io = slow_call->in(TypeFunc::I_O);
3276 3276 Node* fast_mem = slow_call->in(TypeFunc::Memory);
3277 3277
3278 3278 // These two phis are pre-filled with copies of of the fast IO and Memory
3279 3279 PhiNode* result_mem = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3280 3280 PhiNode* result_io = PhiNode::make(result_rgn, fast_io, Type::ABIO);
3281 3281
3282 3282 result_rgn->init_req(slow_result_path, control());
3283 3283 result_io ->init_req(slow_result_path, i_o());
3284 3284 result_mem->init_req(slow_result_path, reset_memory());
3285 3285 result_val->init_req(slow_result_path, slow_val);
3286 3286
3287 3287 set_all_memory(_gvn.transform(result_mem));
3288 3288 set_i_o( _gvn.transform(result_io));
3289 3289 }
3290 3290
3291 3291 C->set_has_split_ifs(true); // Has chance for split-if optimization
3292 3292 set_result(result_rgn, result_val);
3293 3293 return true;
3294 3294 }
3295 3295
3296 3296 //---------------------------load_mirror_from_klass----------------------------
3297 3297 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3298 3298 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3299 3299 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3300 3300 return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3301 3301 }
3302 3302
3303 3303 //-----------------------load_klass_from_mirror_common-------------------------
3304 3304 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3305 3305 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3306 3306 // and branch to the given path on the region.
3307 3307 // If never_see_null, take an uncommon trap on null, so we can optimistically
3308 3308 // compile for the non-null case.
3309 3309 // If the region is NULL, force never_see_null = true.
3310 3310 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3311 3311 bool never_see_null,
3312 3312 RegionNode* region,
3313 3313 int null_path,
3314 3314 int offset) {
3315 3315 if (region == NULL) never_see_null = true;
3316 3316 Node* p = basic_plus_adr(mirror, offset);
3317 3317 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3318 3318 Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3319 3319 Node* null_ctl = top();
3320 3320 kls = null_check_oop(kls, &null_ctl, never_see_null);
3321 3321 if (region != NULL) {
3322 3322 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3323 3323 region->init_req(null_path, null_ctl);
3324 3324 } else {
3325 3325 assert(null_ctl == top(), "no loose ends");
3326 3326 }
3327 3327 return kls;
3328 3328 }
3329 3329
3330 3330 //--------------------(inline_native_Class_query helpers)---------------------
3331 3331 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER.
3332 3332 // Fall through if (mods & mask) == bits, take the guard otherwise.
3333 3333 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3334 3334 // Branch around if the given klass has the given modifier bit set.
3335 3335 // Like generate_guard, adds a new path onto the region.
3336 3336 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3337 3337 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3338 3338 Node* mask = intcon(modifier_mask);
3339 3339 Node* bits = intcon(modifier_bits);
3340 3340 Node* mbit = _gvn.transform(new (C) AndINode(mods, mask));
3341 3341 Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits));
3342 3342 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
3343 3343 return generate_fair_guard(bol, region);
3344 3344 }
3345 3345 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3346 3346 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3347 3347 }
3348 3348
3349 3349 //-------------------------inline_native_Class_query-------------------
3350 3350 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3351 3351 const Type* return_type = TypeInt::BOOL;
3352 3352 Node* prim_return_value = top(); // what happens if it's a primitive class?
3353 3353 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3354 3354 bool expect_prim = false; // most of these guys expect to work on refs
3355 3355
3356 3356 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3357 3357
3358 3358 Node* mirror = argument(0);
3359 3359 Node* obj = top();
3360 3360
3361 3361 switch (id) {
3362 3362 case vmIntrinsics::_isInstance:
3363 3363 // nothing is an instance of a primitive type
3364 3364 prim_return_value = intcon(0);
3365 3365 obj = argument(1);
3366 3366 break;
3367 3367 case vmIntrinsics::_getModifiers:
3368 3368 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3369 3369 assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3370 3370 return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3371 3371 break;
3372 3372 case vmIntrinsics::_isInterface:
3373 3373 prim_return_value = intcon(0);
3374 3374 break;
3375 3375 case vmIntrinsics::_isArray:
3376 3376 prim_return_value = intcon(0);
3377 3377 expect_prim = true; // cf. ObjectStreamClass.getClassSignature
3378 3378 break;
3379 3379 case vmIntrinsics::_isPrimitive:
3380 3380 prim_return_value = intcon(1);
3381 3381 expect_prim = true; // obviously
3382 3382 break;
3383 3383 case vmIntrinsics::_getSuperclass:
3384 3384 prim_return_value = null();
3385 3385 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3386 3386 break;
3387 3387 case vmIntrinsics::_getComponentType:
3388 3388 prim_return_value = null();
3389 3389 return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3390 3390 break;
3391 3391 case vmIntrinsics::_getClassAccessFlags:
3392 3392 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3393 3393 return_type = TypeInt::INT; // not bool! 6297094
3394 3394 break;
3395 3395 default:
3396 3396 fatal_unexpected_iid(id);
3397 3397 break;
3398 3398 }
3399 3399
3400 3400 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3401 3401 if (mirror_con == NULL) return false; // cannot happen?
3402 3402
3403 3403 #ifndef PRODUCT
3404 3404 if (C->print_intrinsics() || C->print_inlining()) {
3405 3405 ciType* k = mirror_con->java_mirror_type();
3406 3406 if (k) {
3407 3407 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3408 3408 k->print_name();
3409 3409 tty->cr();
3410 3410 }
3411 3411 }
3412 3412 #endif
3413 3413
3414 3414 // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3415 3415 RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3416 3416 record_for_igvn(region);
3417 3417 PhiNode* phi = new (C) PhiNode(region, return_type);
3418 3418
3419 3419 // The mirror will never be null of Reflection.getClassAccessFlags, however
3420 3420 // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3421 3421 // if it is. See bug 4774291.
3422 3422
3423 3423 // For Reflection.getClassAccessFlags(), the null check occurs in
3424 3424 // the wrong place; see inline_unsafe_access(), above, for a similar
3425 3425 // situation.
3426 3426 mirror = null_check(mirror);
3427 3427 // If mirror or obj is dead, only null-path is taken.
3428 3428 if (stopped()) return true;
3429 3429
3430 3430 if (expect_prim) never_see_null = false; // expect nulls (meaning prims)
3431 3431
3432 3432 // Now load the mirror's klass metaobject, and null-check it.
3433 3433 // Side-effects region with the control path if the klass is null.
3434 3434 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
3435 3435 // If kls is null, we have a primitive mirror.
3436 3436 phi->init_req(_prim_path, prim_return_value);
3437 3437 if (stopped()) { set_result(region, phi); return true; }
3438 3438 bool safe_for_replace = (region->in(_prim_path) == top());
3439 3439
3440 3440 Node* p; // handy temp
3441 3441 Node* null_ctl;
3442 3442
3443 3443 // Now that we have the non-null klass, we can perform the real query.
3444 3444 // For constant classes, the query will constant-fold in LoadNode::Value.
3445 3445 Node* query_value = top();
3446 3446 switch (id) {
3447 3447 case vmIntrinsics::_isInstance:
3448 3448 // nothing is an instance of a primitive type
3449 3449 query_value = gen_instanceof(obj, kls, safe_for_replace);
3450 3450 break;
3451 3451
3452 3452 case vmIntrinsics::_getModifiers:
3453 3453 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3454 3454 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3455 3455 break;
3456 3456
3457 3457 case vmIntrinsics::_isInterface:
3458 3458 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3459 3459 if (generate_interface_guard(kls, region) != NULL)
3460 3460 // A guard was added. If the guard is taken, it was an interface.
3461 3461 phi->add_req(intcon(1));
3462 3462 // If we fall through, it's a plain class.
3463 3463 query_value = intcon(0);
3464 3464 break;
3465 3465
3466 3466 case vmIntrinsics::_isArray:
3467 3467 // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
3468 3468 if (generate_array_guard(kls, region) != NULL)
3469 3469 // A guard was added. If the guard is taken, it was an array.
3470 3470 phi->add_req(intcon(1));
3471 3471 // If we fall through, it's a plain class.
3472 3472 query_value = intcon(0);
3473 3473 break;
3474 3474
3475 3475 case vmIntrinsics::_isPrimitive:
3476 3476 query_value = intcon(0); // "normal" path produces false
3477 3477 break;
3478 3478
3479 3479 case vmIntrinsics::_getSuperclass:
3480 3480 // The rules here are somewhat unfortunate, but we can still do better
3481 3481 // with random logic than with a JNI call.
3482 3482 // Interfaces store null or Object as _super, but must report null.
3483 3483 // Arrays store an intermediate super as _super, but must report Object.
3484 3484 // Other types can report the actual _super.
3485 3485 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3486 3486 if (generate_interface_guard(kls, region) != NULL)
3487 3487 // A guard was added. If the guard is taken, it was an interface.
3488 3488 phi->add_req(null());
3489 3489 if (generate_array_guard(kls, region) != NULL)
3490 3490 // A guard was added. If the guard is taken, it was an array.
3491 3491 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
3492 3492 // If we fall through, it's a plain class. Get its _super.
3493 3493 p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
3494 3494 kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
3495 3495 null_ctl = top();
3496 3496 kls = null_check_oop(kls, &null_ctl);
3497 3497 if (null_ctl != top()) {
3498 3498 // If the guard is taken, Object.superClass is null (both klass and mirror).
3499 3499 region->add_req(null_ctl);
3500 3500 phi ->add_req(null());
3501 3501 }
3502 3502 if (!stopped()) {
3503 3503 query_value = load_mirror_from_klass(kls);
3504 3504 }
3505 3505 break;
3506 3506
3507 3507 case vmIntrinsics::_getComponentType:
3508 3508 if (generate_array_guard(kls, region) != NULL) {
3509 3509 // Be sure to pin the oop load to the guard edge just created:
3510 3510 Node* is_array_ctrl = region->in(region->req()-1);
3511 3511 Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset()));
3512 3512 Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3513 3513 phi->add_req(cmo);
3514 3514 }
3515 3515 query_value = null(); // non-array case is null
3516 3516 break;
3517 3517
3518 3518 case vmIntrinsics::_getClassAccessFlags:
3519 3519 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3520 3520 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3521 3521 break;
3522 3522
3523 3523 default:
3524 3524 fatal_unexpected_iid(id);
3525 3525 break;
3526 3526 }
3527 3527
3528 3528 // Fall-through is the normal case of a query to a real class.
3529 3529 phi->init_req(1, query_value);
3530 3530 region->init_req(1, control());
3531 3531
3532 3532 C->set_has_split_ifs(true); // Has chance for split-if optimization
3533 3533 set_result(region, phi);
3534 3534 return true;
3535 3535 }
3536 3536
3537 3537 //--------------------------inline_native_subtype_check------------------------
3538 3538 // This intrinsic takes the JNI calls out of the heart of
3539 3539 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3540 3540 bool LibraryCallKit::inline_native_subtype_check() {
3541 3541 // Pull both arguments off the stack.
3542 3542 Node* args[2]; // two java.lang.Class mirrors: superc, subc
3543 3543 args[0] = argument(0);
3544 3544 args[1] = argument(1);
3545 3545 Node* klasses[2]; // corresponding Klasses: superk, subk
3546 3546 klasses[0] = klasses[1] = top();
3547 3547
3548 3548 enum {
3549 3549 // A full decision tree on {superc is prim, subc is prim}:
3550 3550 _prim_0_path = 1, // {P,N} => false
3551 3551 // {P,P} & superc!=subc => false
3552 3552 _prim_same_path, // {P,P} & superc==subc => true
3553 3553 _prim_1_path, // {N,P} => false
3554 3554 _ref_subtype_path, // {N,N} & subtype check wins => true
3555 3555 _both_ref_path, // {N,N} & subtype check loses => false
3556 3556 PATH_LIMIT
3557 3557 };
3558 3558
3559 3559 RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3560 3560 Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
3561 3561 record_for_igvn(region);
3562 3562
3563 3563 const TypePtr* adr_type = TypeRawPtr::BOTTOM; // memory type of loads
3564 3564 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3565 3565 int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3566 3566
3567 3567 // First null-check both mirrors and load each mirror's klass metaobject.
3568 3568 int which_arg;
3569 3569 for (which_arg = 0; which_arg <= 1; which_arg++) {
3570 3570 Node* arg = args[which_arg];
3571 3571 arg = null_check(arg);
3572 3572 if (stopped()) break;
3573 3573 args[which_arg] = arg;
3574 3574
3575 3575 Node* p = basic_plus_adr(arg, class_klass_offset);
3576 3576 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
3577 3577 klasses[which_arg] = _gvn.transform(kls);
3578 3578 }
3579 3579
3580 3580 // Having loaded both klasses, test each for null.
3581 3581 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3582 3582 for (which_arg = 0; which_arg <= 1; which_arg++) {
3583 3583 Node* kls = klasses[which_arg];
3584 3584 Node* null_ctl = top();
3585 3585 kls = null_check_oop(kls, &null_ctl, never_see_null);
3586 3586 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3587 3587 region->init_req(prim_path, null_ctl);
3588 3588 if (stopped()) break;
3589 3589 klasses[which_arg] = kls;
3590 3590 }
3591 3591
3592 3592 if (!stopped()) {
3593 3593 // now we have two reference types, in klasses[0..1]
3594 3594 Node* subk = klasses[1]; // the argument to isAssignableFrom
3595 3595 Node* superk = klasses[0]; // the receiver
3596 3596 region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3597 3597 // now we have a successful reference subtype check
3598 3598 region->set_req(_ref_subtype_path, control());
3599 3599 }
3600 3600
3601 3601 // If both operands are primitive (both klasses null), then
3602 3602 // we must return true when they are identical primitives.
3603 3603 // It is convenient to test this after the first null klass check.
3604 3604 set_control(region->in(_prim_0_path)); // go back to first null check
3605 3605 if (!stopped()) {
3606 3606 // Since superc is primitive, make a guard for the superc==subc case.
3607 3607 Node* cmp_eq = _gvn.transform(new (C) CmpPNode(args[0], args[1]));
3608 3608 Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq));
3609 3609 generate_guard(bol_eq, region, PROB_FAIR);
3610 3610 if (region->req() == PATH_LIMIT+1) {
3611 3611 // A guard was added. If the added guard is taken, superc==subc.
3612 3612 region->swap_edges(PATH_LIMIT, _prim_same_path);
3613 3613 region->del_req(PATH_LIMIT);
3614 3614 }
3615 3615 region->set_req(_prim_0_path, control()); // Not equal after all.
3616 3616 }
3617 3617
3618 3618 // these are the only paths that produce 'true':
3619 3619 phi->set_req(_prim_same_path, intcon(1));
3620 3620 phi->set_req(_ref_subtype_path, intcon(1));
3621 3621
3622 3622 // pull together the cases:
3623 3623 assert(region->req() == PATH_LIMIT, "sane region");
3624 3624 for (uint i = 1; i < region->req(); i++) {
3625 3625 Node* ctl = region->in(i);
3626 3626 if (ctl == NULL || ctl == top()) {
3627 3627 region->set_req(i, top());
3628 3628 phi ->set_req(i, top());
3629 3629 } else if (phi->in(i) == NULL) {
3630 3630 phi->set_req(i, intcon(0)); // all other paths produce 'false'
3631 3631 }
3632 3632 }
3633 3633
3634 3634 set_control(_gvn.transform(region));
3635 3635 set_result(_gvn.transform(phi));
3636 3636 return true;
3637 3637 }
3638 3638
3639 3639 //---------------------generate_array_guard_common------------------------
3640 3640 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3641 3641 bool obj_array, bool not_array) {
3642 3642 // If obj_array/non_array==false/false:
3643 3643 // Branch around if the given klass is in fact an array (either obj or prim).
3644 3644 // If obj_array/non_array==false/true:
3645 3645 // Branch around if the given klass is not an array klass of any kind.
3646 3646 // If obj_array/non_array==true/true:
3647 3647 // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3648 3648 // If obj_array/non_array==true/false:
3649 3649 // Branch around if the kls is an oop array (Object[] or subtype)
3650 3650 //
3651 3651 // Like generate_guard, adds a new path onto the region.
3652 3652 jint layout_con = 0;
3653 3653 Node* layout_val = get_layout_helper(kls, layout_con);
3654 3654 if (layout_val == NULL) {
3655 3655 bool query = (obj_array
3656 3656 ? Klass::layout_helper_is_objArray(layout_con)
3657 3657 : Klass::layout_helper_is_array(layout_con));
3658 3658 if (query == not_array) {
3659 3659 return NULL; // never a branch
3660 3660 } else { // always a branch
3661 3661 Node* always_branch = control();
3662 3662 if (region != NULL)
3663 3663 region->add_req(always_branch);
3664 3664 set_control(top());
3665 3665 return always_branch;
3666 3666 }
3667 3667 }
3668 3668 // Now test the correct condition.
3669 3669 jint nval = (obj_array
3670 3670 ? ((jint)Klass::_lh_array_tag_type_value
3671 3671 << Klass::_lh_array_tag_shift)
3672 3672 : Klass::_lh_neutral_value);
3673 3673 Node* cmp = _gvn.transform(new(C) CmpINode(layout_val, intcon(nval)));
3674 3674 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
3675 3675 // invert the test if we are looking for a non-array
3676 3676 if (not_array) btest = BoolTest(btest).negate();
3677 3677 Node* bol = _gvn.transform(new(C) BoolNode(cmp, btest));
3678 3678 return generate_fair_guard(bol, region);
3679 3679 }
3680 3680
3681 3681
3682 3682 //-----------------------inline_native_newArray--------------------------
3683 3683 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3684 3684 bool LibraryCallKit::inline_native_newArray() {
3685 3685 Node* mirror = argument(0);
3686 3686 Node* count_val = argument(1);
3687 3687
3688 3688 mirror = null_check(mirror);
3689 3689 // If mirror or obj is dead, only null-path is taken.
3690 3690 if (stopped()) return true;
3691 3691
3692 3692 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3693 3693 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
3694 3694 PhiNode* result_val = new(C) PhiNode(result_reg,
3695 3695 TypeInstPtr::NOTNULL);
3696 3696 PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO);
3697 3697 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
3698 3698 TypePtr::BOTTOM);
3699 3699
3700 3700 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3701 3701 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3702 3702 result_reg, _slow_path);
3703 3703 Node* normal_ctl = control();
3704 3704 Node* no_array_ctl = result_reg->in(_slow_path);
3705 3705
3706 3706 // Generate code for the slow case. We make a call to newArray().
3707 3707 set_control(no_array_ctl);
3708 3708 if (!stopped()) {
3709 3709 // Either the input type is void.class, or else the
3710 3710 // array klass has not yet been cached. Either the
3711 3711 // ensuing call will throw an exception, or else it
3712 3712 // will cache the array klass for next time.
3713 3713 PreserveJVMState pjvms(this);
3714 3714 CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
3715 3715 Node* slow_result = set_results_for_java_call(slow_call);
3716 3716 // this->control() comes from set_results_for_java_call
3717 3717 result_reg->set_req(_slow_path, control());
3718 3718 result_val->set_req(_slow_path, slow_result);
3719 3719 result_io ->set_req(_slow_path, i_o());
3720 3720 result_mem->set_req(_slow_path, reset_memory());
3721 3721 }
3722 3722
3723 3723 set_control(normal_ctl);
3724 3724 if (!stopped()) {
3725 3725 // Normal case: The array type has been cached in the java.lang.Class.
3726 3726 // The following call works fine even if the array type is polymorphic.
3727 3727 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3728 3728 Node* obj = new_array(klass_node, count_val, 0); // no arguments to push
3729 3729 result_reg->init_req(_normal_path, control());
3730 3730 result_val->init_req(_normal_path, obj);
3731 3731 result_io ->init_req(_normal_path, i_o());
3732 3732 result_mem->init_req(_normal_path, reset_memory());
3733 3733 }
3734 3734
3735 3735 // Return the combined state.
3736 3736 set_i_o( _gvn.transform(result_io) );
3737 3737 set_all_memory( _gvn.transform(result_mem));
3738 3738
3739 3739 C->set_has_split_ifs(true); // Has chance for split-if optimization
3740 3740 set_result(result_reg, result_val);
3741 3741 return true;
3742 3742 }
3743 3743
3744 3744 //----------------------inline_native_getLength--------------------------
3745 3745 // public static native int java.lang.reflect.Array.getLength(Object array);
3746 3746 bool LibraryCallKit::inline_native_getLength() {
3747 3747 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3748 3748
3749 3749 Node* array = null_check(argument(0));
3750 3750 // If array is dead, only null-path is taken.
3751 3751 if (stopped()) return true;
3752 3752
3753 3753 // Deoptimize if it is a non-array.
3754 3754 Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
3755 3755
3756 3756 if (non_array != NULL) {
3757 3757 PreserveJVMState pjvms(this);
3758 3758 set_control(non_array);
3759 3759 uncommon_trap(Deoptimization::Reason_intrinsic,
3760 3760 Deoptimization::Action_maybe_recompile);
3761 3761 }
3762 3762
3763 3763 // If control is dead, only non-array-path is taken.
3764 3764 if (stopped()) return true;
3765 3765
3766 3766 // The works fine even if the array type is polymorphic.
3767 3767 // It could be a dynamic mix of int[], boolean[], Object[], etc.
3768 3768 Node* result = load_array_length(array);
3769 3769
3770 3770 C->set_has_split_ifs(true); // Has chance for split-if optimization
3771 3771 set_result(result);
3772 3772 return true;
3773 3773 }
3774 3774
3775 3775 //------------------------inline_array_copyOf----------------------------
3776 3776 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType);
3777 3777 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType);
3778 3778 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3779 3779 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false;
3780 3780
3781 3781 // Get the arguments.
3782 3782 Node* original = argument(0);
3783 3783 Node* start = is_copyOfRange? argument(1): intcon(0);
3784 3784 Node* end = is_copyOfRange? argument(2): argument(1);
3785 3785 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3786 3786
3787 3787 Node* newcopy;
3788 3788
3789 3789 // Set the original stack and the reexecute bit for the interpreter to reexecute
3790 3790 // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3791 3791 { PreserveReexecuteState preexecs(this);
3792 3792 jvms()->set_should_reexecute(true);
3793 3793
3794 3794 array_type_mirror = null_check(array_type_mirror);
3795 3795 original = null_check(original);
3796 3796
3797 3797 // Check if a null path was taken unconditionally.
3798 3798 if (stopped()) return true;
3799 3799
3800 3800 Node* orig_length = load_array_length(original);
3801 3801
3802 3802 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3803 3803 klass_node = null_check(klass_node);
3804 3804
3805 3805 RegionNode* bailout = new (C) RegionNode(1);
3806 3806 record_for_igvn(bailout);
3807 3807
3808 3808 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3809 3809 // Bail out if that is so.
3810 3810 Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3811 3811 if (not_objArray != NULL) {
3812 3812 // Improve the klass node's type from the new optimistic assumption:
3813 3813 ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3814 3814 const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3815 3815 Node* cast = new (C) CastPPNode(klass_node, akls);
3816 3816 cast->init_req(0, control());
3817 3817 klass_node = _gvn.transform(cast);
3818 3818 }
3819 3819
3820 3820 // Bail out if either start or end is negative.
3821 3821 generate_negative_guard(start, bailout, &start);
3822 3822 generate_negative_guard(end, bailout, &end);
3823 3823
3824 3824 Node* length = end;
3825 3825 if (_gvn.type(start) != TypeInt::ZERO) {
3826 3826 length = _gvn.transform(new (C) SubINode(end, start));
3827 3827 }
3828 3828
3829 3829 // Bail out if length is negative.
3830 3830 // Without this the new_array would throw
3831 3831 // NegativeArraySizeException but IllegalArgumentException is what
3832 3832 // should be thrown
3833 3833 generate_negative_guard(length, bailout, &length);
3834 3834
3835 3835 if (bailout->req() > 1) {
3836 3836 PreserveJVMState pjvms(this);
3837 3837 set_control(_gvn.transform(bailout));
3838 3838 uncommon_trap(Deoptimization::Reason_intrinsic,
3839 3839 Deoptimization::Action_maybe_recompile);
3840 3840 }
3841 3841
3842 3842 if (!stopped()) {
3843 3843 // How many elements will we copy from the original?
3844 3844 // The answer is MinI(orig_length - start, length).
3845 3845 Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start));
3846 3846 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3847 3847
3848 3848 newcopy = new_array(klass_node, length, 0); // no argments to push
3849 3849
3850 3850 // Generate a direct call to the right arraycopy function(s).
3851 3851 // We know the copy is disjoint but we might not know if the
3852 3852 // oop stores need checking.
3853 3853 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
3854 3854 // This will fail a store-check if x contains any non-nulls.
3855 3855 bool disjoint_bases = true;
3856 3856 // if start > orig_length then the length of the copy may be
3857 3857 // negative.
3858 3858 bool length_never_negative = !is_copyOfRange;
3859 3859 generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
3860 3860 original, start, newcopy, intcon(0), moved,
3861 3861 disjoint_bases, length_never_negative);
3862 3862 }
3863 3863 } // original reexecute is set back here
3864 3864
3865 3865 C->set_has_split_ifs(true); // Has chance for split-if optimization
3866 3866 if (!stopped()) {
3867 3867 set_result(newcopy);
3868 3868 }
3869 3869 return true;
3870 3870 }
3871 3871
3872 3872
3873 3873 //----------------------generate_virtual_guard---------------------------
3874 3874 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call.
3875 3875 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3876 3876 RegionNode* slow_region) {
3877 3877 ciMethod* method = callee();
3878 3878 int vtable_index = method->vtable_index();
3879 3879 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3880 3880 err_msg_res("bad index %d", vtable_index));
3881 3881 // Get the Method* out of the appropriate vtable entry.
3882 3882 int entry_offset = (InstanceKlass::vtable_start_offset() +
3883 3883 vtable_index*vtableEntry::size()) * wordSize +
3884 3884 vtableEntry::method_offset_in_bytes();
3885 3885 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
3886 3886 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3887 3887
3888 3888 // Compare the target method with the expected method (e.g., Object.hashCode).
3889 3889 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3890 3890
3891 3891 Node* native_call = makecon(native_call_addr);
3892 3892 Node* chk_native = _gvn.transform(new(C) CmpPNode(target_call, native_call));
3893 3893 Node* test_native = _gvn.transform(new(C) BoolNode(chk_native, BoolTest::ne));
3894 3894
3895 3895 return generate_slow_guard(test_native, slow_region);
3896 3896 }
3897 3897
3898 3898 //-----------------------generate_method_call----------------------------
3899 3899 // Use generate_method_call to make a slow-call to the real
3900 3900 // method if the fast path fails. An alternative would be to
3901 3901 // use a stub like OptoRuntime::slow_arraycopy_Java.
3902 3902 // This only works for expanding the current library call,
3903 3903 // not another intrinsic. (E.g., don't use this for making an
3904 3904 // arraycopy call inside of the copyOf intrinsic.)
3905 3905 CallJavaNode*
3906 3906 LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
3907 3907 // When compiling the intrinsic method itself, do not use this technique.
3908 3908 guarantee(callee() != C->method(), "cannot make slow-call to self");
3909 3909
3910 3910 ciMethod* method = callee();
3911 3911 // ensure the JVMS we have will be correct for this call
3912 3912 guarantee(method_id == method->intrinsic_id(), "must match");
3913 3913
3914 3914 const TypeFunc* tf = TypeFunc::make(method);
3915 3915 CallJavaNode* slow_call;
3916 3916 if (is_static) {
3917 3917 assert(!is_virtual, "");
3918 3918 slow_call = new(C) CallStaticJavaNode(C, tf,
3919 3919 SharedRuntime::get_resolve_static_call_stub(),
3920 3920 method, bci());
3921 3921 } else if (is_virtual) {
3922 3922 null_check_receiver();
3923 3923 int vtable_index = Method::invalid_vtable_index;
3924 3924 if (UseInlineCaches) {
3925 3925 // Suppress the vtable call
3926 3926 } else {
3927 3927 // hashCode and clone are not a miranda methods,
3928 3928 // so the vtable index is fixed.
3929 3929 // No need to use the linkResolver to get it.
3930 3930 vtable_index = method->vtable_index();
3931 3931 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3932 3932 err_msg_res("bad index %d", vtable_index));
3933 3933 }
3934 3934 slow_call = new(C) CallDynamicJavaNode(tf,
3935 3935 SharedRuntime::get_resolve_virtual_call_stub(),
3936 3936 method, vtable_index, bci());
3937 3937 } else { // neither virtual nor static: opt_virtual
3938 3938 null_check_receiver();
3939 3939 slow_call = new(C) CallStaticJavaNode(C, tf,
3940 3940 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3941 3941 method, bci());
3942 3942 slow_call->set_optimized_virtual(true);
3943 3943 }
3944 3944 set_arguments_for_java_call(slow_call);
3945 3945 set_edges_for_java_call(slow_call);
3946 3946 return slow_call;
3947 3947 }
3948 3948
3949 3949
3950 3950 //------------------------------inline_native_hashcode--------------------
3951 3951 // Build special case code for calls to hashCode on an object.
3952 3952 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3953 3953 assert(is_static == callee()->is_static(), "correct intrinsic selection");
3954 3954 assert(!(is_virtual && is_static), "either virtual, special, or static");
3955 3955
3956 3956 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3957 3957
3958 3958 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
3959 3959 PhiNode* result_val = new(C) PhiNode(result_reg,
3960 3960 TypeInt::INT);
3961 3961 PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO);
3962 3962 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
3963 3963 TypePtr::BOTTOM);
3964 3964 Node* obj = NULL;
3965 3965 if (!is_static) {
3966 3966 // Check for hashing null object
3967 3967 obj = null_check_receiver();
3968 3968 if (stopped()) return true; // unconditionally null
3969 3969 result_reg->init_req(_null_path, top());
3970 3970 result_val->init_req(_null_path, top());
3971 3971 } else {
3972 3972 // Do a null check, and return zero if null.
3973 3973 // System.identityHashCode(null) == 0
3974 3974 obj = argument(0);
3975 3975 Node* null_ctl = top();
3976 3976 obj = null_check_oop(obj, &null_ctl);
3977 3977 result_reg->init_req(_null_path, null_ctl);
3978 3978 result_val->init_req(_null_path, _gvn.intcon(0));
3979 3979 }
3980 3980
3981 3981 // Unconditionally null? Then return right away.
3982 3982 if (stopped()) {
3983 3983 set_control( result_reg->in(_null_path));
3984 3984 if (!stopped())
3985 3985 set_result(result_val->in(_null_path));
3986 3986 return true;
3987 3987 }
3988 3988
3989 3989 // After null check, get the object's klass.
3990 3990 Node* obj_klass = load_object_klass(obj);
3991 3991
3992 3992 // This call may be virtual (invokevirtual) or bound (invokespecial).
3993 3993 // For each case we generate slightly different code.
3994 3994
3995 3995 // We only go to the fast case code if we pass a number of guards. The
3996 3996 // paths which do not pass are accumulated in the slow_region.
3997 3997 RegionNode* slow_region = new (C) RegionNode(1);
3998 3998 record_for_igvn(slow_region);
3999 3999
4000 4000 // If this is a virtual call, we generate a funny guard. We pull out
4001 4001 // the vtable entry corresponding to hashCode() from the target object.
4002 4002 // If the target method which we are calling happens to be the native
4003 4003 // Object hashCode() method, we pass the guard. We do not need this
4004 4004 // guard for non-virtual calls -- the caller is known to be the native
4005 4005 // Object hashCode().
4006 4006 if (is_virtual) {
4007 4007 generate_virtual_guard(obj_klass, slow_region);
4008 4008 }
4009 4009
4010 4010 // Get the header out of the object, use LoadMarkNode when available
4011 4011 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4012 4012 Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4013 4013
4014 4014 // Test the header to see if it is unlocked.
4015 4015 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
4016 4016 Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask));
4017 4017 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
4018 4018 Node *chk_unlocked = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val));
4019 4019 Node *test_unlocked = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne));
4020 4020
4021 4021 generate_slow_guard(test_unlocked, slow_region);
4022 4022
4023 4023 // Get the hash value and check to see that it has been properly assigned.
4024 4024 // We depend on hash_mask being at most 32 bits and avoid the use of
4025 4025 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4026 4026 // vm: see markOop.hpp.
4027 4027 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask);
4028 4028 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
4029 4029 Node *hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift));
4030 4030 // This hack lets the hash bits live anywhere in the mark object now, as long
4031 4031 // as the shift drops the relevant bits into the low 32 bits. Note that
4032 4032 // Java spec says that HashCode is an int so there's no point in capturing
4033 4033 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4034 4034 hshifted_header = ConvX2I(hshifted_header);
4035 4035 Node *hash_val = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask));
4036 4036
4037 4037 Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash);
4038 4038 Node *chk_assigned = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val));
4039 4039 Node *test_assigned = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq));
4040 4040
4041 4041 generate_slow_guard(test_assigned, slow_region);
4042 4042
4043 4043 Node* init_mem = reset_memory();
4044 4044 // fill in the rest of the null path:
4045 4045 result_io ->init_req(_null_path, i_o());
4046 4046 result_mem->init_req(_null_path, init_mem);
4047 4047
4048 4048 result_val->init_req(_fast_path, hash_val);
4049 4049 result_reg->init_req(_fast_path, control());
4050 4050 result_io ->init_req(_fast_path, i_o());
4051 4051 result_mem->init_req(_fast_path, init_mem);
4052 4052
4053 4053 // Generate code for the slow case. We make a call to hashCode().
4054 4054 set_control(_gvn.transform(slow_region));
4055 4055 if (!stopped()) {
4056 4056 // No need for PreserveJVMState, because we're using up the present state.
4057 4057 set_all_memory(init_mem);
4058 4058 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4059 4059 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
4060 4060 Node* slow_result = set_results_for_java_call(slow_call);
4061 4061 // this->control() comes from set_results_for_java_call
4062 4062 result_reg->init_req(_slow_path, control());
4063 4063 result_val->init_req(_slow_path, slow_result);
4064 4064 result_io ->set_req(_slow_path, i_o());
4065 4065 result_mem ->set_req(_slow_path, reset_memory());
4066 4066 }
4067 4067
4068 4068 // Return the combined state.
4069 4069 set_i_o( _gvn.transform(result_io) );
4070 4070 set_all_memory( _gvn.transform(result_mem));
4071 4071
4072 4072 set_result(result_reg, result_val);
4073 4073 return true;
4074 4074 }
4075 4075
4076 4076 //---------------------------inline_native_getClass----------------------------
4077 4077 // public final native Class<?> java.lang.Object.getClass();
4078 4078 //
4079 4079 // Build special case code for calls to getClass on an object.
4080 4080 bool LibraryCallKit::inline_native_getClass() {
4081 4081 Node* obj = null_check_receiver();
4082 4082 if (stopped()) return true;
4083 4083 set_result(load_mirror_from_klass(load_object_klass(obj)));
4084 4084 return true;
4085 4085 }
4086 4086
4087 4087 //-----------------inline_native_Reflection_getCallerClass---------------------
4088 4088 // public static native Class<?> sun.reflect.Reflection.getCallerClass();
4089 4089 //
4090 4090 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4091 4091 //
4092 4092 // NOTE: This code must perform the same logic as JVM_GetCallerClass
4093 4093 // in that it must skip particular security frames and checks for
4094 4094 // caller sensitive methods.
4095 4095 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4096 4096 #ifndef PRODUCT
4097 4097 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4098 4098 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4099 4099 }
4100 4100 #endif
4101 4101
4102 4102 if (!jvms()->has_method()) {
4103 4103 #ifndef PRODUCT
4104 4104 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4105 4105 tty->print_cr(" Bailing out because intrinsic was inlined at top level");
4106 4106 }
4107 4107 #endif
4108 4108 return false;
4109 4109 }
4110 4110
4111 4111 // Walk back up the JVM state to find the caller at the required
4112 4112 // depth.
4113 4113 JVMState* caller_jvms = jvms();
4114 4114
4115 4115 // Cf. JVM_GetCallerClass
4116 4116 // NOTE: Start the loop at depth 1 because the current JVM state does
4117 4117 // not include the Reflection.getCallerClass() frame.
4118 4118 for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
4119 4119 ciMethod* m = caller_jvms->method();
4120 4120 switch (n) {
4121 4121 case 0:
4122 4122 fatal("current JVM state does not include the Reflection.getCallerClass frame");
4123 4123 break;
4124 4124 case 1:
4125 4125 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
4126 4126 if (!m->caller_sensitive()) {
4127 4127 #ifndef PRODUCT
4128 4128 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4129 4129 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
4130 4130 }
4131 4131 #endif
4132 4132 return false; // bail-out; let JVM_GetCallerClass do the work
4133 4133 }
4134 4134 break;
4135 4135 default:
4136 4136 if (!m->is_ignored_by_security_stack_walk()) {
4137 4137 // We have reached the desired frame; return the holder class.
4138 4138 // Acquire method holder as java.lang.Class and push as constant.
4139 4139 ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
4140 4140 ciInstance* caller_mirror = caller_klass->java_mirror();
4141 4141 set_result(makecon(TypeInstPtr::make(caller_mirror)));
4142 4142
4143 4143 #ifndef PRODUCT
4144 4144 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4145 4145 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
4146 4146 tty->print_cr(" JVM state at this point:");
4147 4147 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4148 4148 ciMethod* m = jvms()->of_depth(i)->method();
4149 4149 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4150 4150 }
4151 4151 }
4152 4152 #endif
4153 4153 return true;
4154 4154 }
4155 4155 break;
4156 4156 }
4157 4157 }
4158 4158
4159 4159 #ifndef PRODUCT
4160 4160 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4161 4161 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4162 4162 tty->print_cr(" JVM state at this point:");
4163 4163 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4164 4164 ciMethod* m = jvms()->of_depth(i)->method();
4165 4165 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4166 4166 }
4167 4167 }
4168 4168 #endif
4169 4169
4170 4170 return false; // bail-out; let JVM_GetCallerClass do the work
4171 4171 }
4172 4172
4173 4173 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4174 4174 Node* arg = argument(0);
4175 4175 Node* result;
4176 4176
4177 4177 switch (id) {
4178 4178 case vmIntrinsics::_floatToRawIntBits: result = new (C) MoveF2INode(arg); break;
4179 4179 case vmIntrinsics::_intBitsToFloat: result = new (C) MoveI2FNode(arg); break;
4180 4180 case vmIntrinsics::_doubleToRawLongBits: result = new (C) MoveD2LNode(arg); break;
4181 4181 case vmIntrinsics::_longBitsToDouble: result = new (C) MoveL2DNode(arg); break;
4182 4182
4183 4183 case vmIntrinsics::_doubleToLongBits: {
4184 4184 // two paths (plus control) merge in a wood
4185 4185 RegionNode *r = new (C) RegionNode(3);
4186 4186 Node *phi = new (C) PhiNode(r, TypeLong::LONG);
4187 4187
4188 4188 Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg));
4189 4189 // Build the boolean node
4190 4190 Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
4191 4191
4192 4192 // Branch either way.
4193 4193 // NaN case is less traveled, which makes all the difference.
4194 4194 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4195 4195 Node *opt_isnan = _gvn.transform(ifisnan);
4196 4196 assert( opt_isnan->is_If(), "Expect an IfNode");
4197 4197 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4198 4198 Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));
4199 4199
4200 4200 set_control(iftrue);
4201 4201
4202 4202 static const jlong nan_bits = CONST64(0x7ff8000000000000);
4203 4203 Node *slow_result = longcon(nan_bits); // return NaN
4204 4204 phi->init_req(1, _gvn.transform( slow_result ));
4205 4205 r->init_req(1, iftrue);
4206 4206
4207 4207 // Else fall through
4208 4208 Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
4209 4209 set_control(iffalse);
4210 4210
4211 4211 phi->init_req(2, _gvn.transform(new (C) MoveD2LNode(arg)));
4212 4212 r->init_req(2, iffalse);
4213 4213
4214 4214 // Post merge
4215 4215 set_control(_gvn.transform(r));
4216 4216 record_for_igvn(r);
4217 4217
4218 4218 C->set_has_split_ifs(true); // Has chance for split-if optimization
4219 4219 result = phi;
4220 4220 assert(result->bottom_type()->isa_long(), "must be");
4221 4221 break;
4222 4222 }
4223 4223
4224 4224 case vmIntrinsics::_floatToIntBits: {
4225 4225 // two paths (plus control) merge in a wood
4226 4226 RegionNode *r = new (C) RegionNode(3);
4227 4227 Node *phi = new (C) PhiNode(r, TypeInt::INT);
4228 4228
4229 4229 Node *cmpisnan = _gvn.transform(new (C) CmpFNode(arg, arg));
4230 4230 // Build the boolean node
4231 4231 Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
4232 4232
4233 4233 // Branch either way.
4234 4234 // NaN case is less traveled, which makes all the difference.
4235 4235 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4236 4236 Node *opt_isnan = _gvn.transform(ifisnan);
4237 4237 assert( opt_isnan->is_If(), "Expect an IfNode");
4238 4238 IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4239 4239 Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));
4240 4240
4241 4241 set_control(iftrue);
4242 4242
4243 4243 static const jint nan_bits = 0x7fc00000;
4244 4244 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4245 4245 phi->init_req(1, _gvn.transform( slow_result ));
4246 4246 r->init_req(1, iftrue);
4247 4247
4248 4248 // Else fall through
4249 4249 Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
4250 4250 set_control(iffalse);
4251 4251
4252 4252 phi->init_req(2, _gvn.transform(new (C) MoveF2INode(arg)));
4253 4253 r->init_req(2, iffalse);
4254 4254
4255 4255 // Post merge
4256 4256 set_control(_gvn.transform(r));
4257 4257 record_for_igvn(r);
4258 4258
4259 4259 C->set_has_split_ifs(true); // Has chance for split-if optimization
4260 4260 result = phi;
4261 4261 assert(result->bottom_type()->isa_int(), "must be");
4262 4262 break;
4263 4263 }
4264 4264
4265 4265 default:
4266 4266 fatal_unexpected_iid(id);
4267 4267 break;
4268 4268 }
4269 4269 set_result(_gvn.transform(result));
4270 4270 return true;
4271 4271 }
4272 4272
4273 4273 #ifdef _LP64
4274 4274 #define XTOP ,top() /*additional argument*/
4275 4275 #else //_LP64
4276 4276 #define XTOP /*no additional argument*/
4277 4277 #endif //_LP64
4278 4278
4279 4279 //----------------------inline_unsafe_copyMemory-------------------------
4280 4280 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4281 4281 bool LibraryCallKit::inline_unsafe_copyMemory() {
4282 4282 if (callee()->is_static()) return false; // caller must have the capability!
4283 4283 null_check_receiver(); // null-check receiver
4284 4284 if (stopped()) return true;
4285 4285
4286 4286 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
4287 4287
4288 4288 Node* src_ptr = argument(1); // type: oop
4289 4289 Node* src_off = ConvL2X(argument(2)); // type: long
4290 4290 Node* dst_ptr = argument(4); // type: oop
4291 4291 Node* dst_off = ConvL2X(argument(5)); // type: long
4292 4292 Node* size = ConvL2X(argument(7)); // type: long
4293 4293
4294 4294 assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4295 4295 "fieldOffset must be byte-scaled");
4296 4296
4297 4297 Node* src = make_unsafe_address(src_ptr, src_off);
4298 4298 Node* dst = make_unsafe_address(dst_ptr, dst_off);
4299 4299
4300 4300 // Conservatively insert a memory barrier on all memory slices.
4301 4301 // Do not let writes of the copy source or destination float below the copy.
4302 4302 insert_mem_bar(Op_MemBarCPUOrder);
4303 4303
4304 4304 // Call it. Note that the length argument is not scaled.
4305 4305 make_runtime_call(RC_LEAF|RC_NO_FP,
4306 4306 OptoRuntime::fast_arraycopy_Type(),
4307 4307 StubRoutines::unsafe_arraycopy(),
4308 4308 "unsafe_arraycopy",
4309 4309 TypeRawPtr::BOTTOM,
4310 4310 src, dst, size XTOP);
4311 4311
4312 4312 // Do not let reads of the copy destination float above the copy.
4313 4313 insert_mem_bar(Op_MemBarCPUOrder);
4314 4314
4315 4315 return true;
4316 4316 }
4317 4317
4318 4318 //------------------------clone_coping-----------------------------------
4319 4319 // Helper function for inline_native_clone.
4320 4320 void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4321 4321 assert(obj_size != NULL, "");
4322 4322 Node* raw_obj = alloc_obj->in(1);
4323 4323 assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4324 4324
4325 4325 AllocateNode* alloc = NULL;
4326 4326 if (ReduceBulkZeroing) {
4327 4327 // We will be completely responsible for initializing this object -
4328 4328 // mark Initialize node as complete.
4329 4329 alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4330 4330 // The object was just allocated - there should be no any stores!
4331 4331 guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4332 4332 // Mark as complete_with_arraycopy so that on AllocateNode
4333 4333 // expansion, we know this AllocateNode is initialized by an array
4334 4334 // copy and a StoreStore barrier exists after the array copy.
4335 4335 alloc->initialization()->set_complete_with_arraycopy();
4336 4336 }
4337 4337
4338 4338 // Copy the fastest available way.
4339 4339 // TODO: generate fields copies for small objects instead.
4340 4340 Node* src = obj;
4341 4341 Node* dest = alloc_obj;
4342 4342 Node* size = _gvn.transform(obj_size);
4343 4343
4344 4344 // Exclude the header but include array length to copy by 8 bytes words.
4345 4345 // Can't use base_offset_in_bytes(bt) since basic type is unknown.
4346 4346 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
4347 4347 instanceOopDesc::base_offset_in_bytes();
4348 4348 // base_off:
4349 4349 // 8 - 32-bit VM
4350 4350 // 12 - 64-bit VM, compressed klass
4351 4351 // 16 - 64-bit VM, normal klass
4352 4352 if (base_off % BytesPerLong != 0) {
4353 4353 assert(UseCompressedClassPointers, "");
4354 4354 if (is_array) {
4355 4355 // Exclude length to copy by 8 bytes words.
4356 4356 base_off += sizeof(int);
4357 4357 } else {
4358 4358 // Include klass to copy by 8 bytes words.
4359 4359 base_off = instanceOopDesc::klass_offset_in_bytes();
4360 4360 }
4361 4361 assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4362 4362 }
4363 4363 src = basic_plus_adr(src, base_off);
4364 4364 dest = basic_plus_adr(dest, base_off);
4365 4365
4366 4366 // Compute the length also, if needed:
4367 4367 Node* countx = size;
4368 4368 countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4369 4369 countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4370 4370
4371 4371 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4372 4372 bool disjoint_bases = true;
4373 4373 generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4374 4374 src, NULL, dest, NULL, countx,
4375 4375 /*dest_uninitialized*/true);
4376 4376
4377 4377 // If necessary, emit some card marks afterwards. (Non-arrays only.)
4378 4378 if (card_mark) {
4379 4379 assert(!is_array, "");
4380 4380 // Put in store barrier for any and all oops we are sticking
4381 4381 // into this object. (We could avoid this if we could prove
4382 4382 // that the object type contains no oop fields at all.)
4383 4383 Node* no_particular_value = NULL;
4384 4384 Node* no_particular_field = NULL;
4385 4385 int raw_adr_idx = Compile::AliasIdxRaw;
4386 4386 post_barrier(control(),
4387 4387 memory(raw_adr_type),
4388 4388 alloc_obj,
4389 4389 no_particular_field,
4390 4390 raw_adr_idx,
4391 4391 no_particular_value,
4392 4392 T_OBJECT,
4393 4393 false);
4394 4394 }
4395 4395
4396 4396 // Do not let reads from the cloned object float above the arraycopy.
4397 4397 if (alloc != NULL) {
4398 4398 // Do not let stores that initialize this object be reordered with
4399 4399 // a subsequent store that would make this object accessible by
4400 4400 // other threads.
4401 4401 // Record what AllocateNode this StoreStore protects so that
4402 4402 // escape analysis can go from the MemBarStoreStoreNode to the
4403 4403 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4404 4404 // based on the escape status of the AllocateNode.
4405 4405 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
4406 4406 } else {
4407 4407 insert_mem_bar(Op_MemBarCPUOrder);
4408 4408 }
4409 4409 }
4410 4410
4411 4411 //------------------------inline_native_clone----------------------------
4412 4412 // protected native Object java.lang.Object.clone();
4413 4413 //
4414 4414 // Here are the simple edge cases:
4415 4415 // null receiver => normal trap
4416 4416 // virtual and clone was overridden => slow path to out-of-line clone
4417 4417 // not cloneable or finalizer => slow path to out-of-line Object.clone
4418 4418 //
4419 4419 // The general case has two steps, allocation and copying.
4420 4420 // Allocation has two cases, and uses GraphKit::new_instance or new_array.
4421 4421 //
4422 4422 // Copying also has two cases, oop arrays and everything else.
4423 4423 // Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4424 4424 // Everything else uses the tight inline loop supplied by CopyArrayNode.
4425 4425 //
4426 4426 // These steps fold up nicely if and when the cloned object's klass
4427 4427 // can be sharply typed as an object array, a type array, or an instance.
4428 4428 //
4429 4429 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4430 4430 PhiNode* result_val;
4431 4431
4432 4432 // Set the reexecute bit for the interpreter to reexecute
4433 4433 // the bytecode that invokes Object.clone if deoptimization happens.
4434 4434 { PreserveReexecuteState preexecs(this);
4435 4435 jvms()->set_should_reexecute(true);
4436 4436
4437 4437 Node* obj = null_check_receiver();
4438 4438 if (stopped()) return true;
4439 4439
4440 4440 Node* obj_klass = load_object_klass(obj);
4441 4441 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4442 4442 const TypeOopPtr* toop = ((tklass != NULL)
4443 4443 ? tklass->as_instance_type()
4444 4444 : TypeInstPtr::NOTNULL);
4445 4445
4446 4446 // Conservatively insert a memory barrier on all memory slices.
4447 4447 // Do not let writes into the original float below the clone.
4448 4448 insert_mem_bar(Op_MemBarCPUOrder);
4449 4449
4450 4450 // paths into result_reg:
4451 4451 enum {
4452 4452 _slow_path = 1, // out-of-line call to clone method (virtual or not)
4453 4453 _objArray_path, // plain array allocation, plus arrayof_oop_arraycopy
4454 4454 _array_path, // plain array allocation, plus arrayof_long_arraycopy
4455 4455 _instance_path, // plain instance allocation, plus arrayof_long_arraycopy
4456 4456 PATH_LIMIT
4457 4457 };
4458 4458 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
4459 4459 result_val = new(C) PhiNode(result_reg,
4460 4460 TypeInstPtr::NOTNULL);
4461 4461 PhiNode* result_i_o = new(C) PhiNode(result_reg, Type::ABIO);
4462 4462 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
4463 4463 TypePtr::BOTTOM);
4464 4464 record_for_igvn(result_reg);
4465 4465
4466 4466 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4467 4467 int raw_adr_idx = Compile::AliasIdxRaw;
4468 4468
4469 4469 Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4470 4470 if (array_ctl != NULL) {
4471 4471 // It's an array.
4472 4472 PreserveJVMState pjvms(this);
4473 4473 set_control(array_ctl);
4474 4474 Node* obj_length = load_array_length(obj);
4475 4475 Node* obj_size = NULL;
4476 4476 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push
4477 4477
4478 4478 if (!use_ReduceInitialCardMarks()) {
4479 4479 // If it is an oop array, it requires very special treatment,
4480 4480 // because card marking is required on each card of the array.
4481 4481 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4482 4482 if (is_obja != NULL) {
4483 4483 PreserveJVMState pjvms2(this);
4484 4484 set_control(is_obja);
4485 4485 // Generate a direct call to the right arraycopy function(s).
4486 4486 bool disjoint_bases = true;
4487 4487 bool length_never_negative = true;
4488 4488 generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4489 4489 obj, intcon(0), alloc_obj, intcon(0),
4490 4490 obj_length,
4491 4491 disjoint_bases, length_never_negative);
4492 4492 result_reg->init_req(_objArray_path, control());
4493 4493 result_val->init_req(_objArray_path, alloc_obj);
4494 4494 result_i_o ->set_req(_objArray_path, i_o());
4495 4495 result_mem ->set_req(_objArray_path, reset_memory());
4496 4496 }
4497 4497 }
4498 4498 // Otherwise, there are no card marks to worry about.
4499 4499 // (We can dispense with card marks if we know the allocation
4500 4500 // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
4501 4501 // causes the non-eden paths to take compensating steps to
4502 4502 // simulate a fresh allocation, so that no further
4503 4503 // card marks are required in compiled code to initialize
4504 4504 // the object.)
4505 4505
4506 4506 if (!stopped()) {
4507 4507 copy_to_clone(obj, alloc_obj, obj_size, true, false);
4508 4508
4509 4509 // Present the results of the copy.
4510 4510 result_reg->init_req(_array_path, control());
4511 4511 result_val->init_req(_array_path, alloc_obj);
4512 4512 result_i_o ->set_req(_array_path, i_o());
4513 4513 result_mem ->set_req(_array_path, reset_memory());
4514 4514 }
4515 4515 }
4516 4516
4517 4517 // We only go to the instance fast case code if we pass a number of guards.
4518 4518 // The paths which do not pass are accumulated in the slow_region.
4519 4519 RegionNode* slow_region = new (C) RegionNode(1);
4520 4520 record_for_igvn(slow_region);
4521 4521 if (!stopped()) {
4522 4522 // It's an instance (we did array above). Make the slow-path tests.
4523 4523 // If this is a virtual call, we generate a funny guard. We grab
4524 4524 // the vtable entry corresponding to clone() from the target object.
4525 4525 // If the target method which we are calling happens to be the
4526 4526 // Object clone() method, we pass the guard. We do not need this
4527 4527 // guard for non-virtual calls; the caller is known to be the native
4528 4528 // Object clone().
4529 4529 if (is_virtual) {
4530 4530 generate_virtual_guard(obj_klass, slow_region);
4531 4531 }
4532 4532
4533 4533 // The object must be cloneable and must not have a finalizer.
4534 4534 // Both of these conditions may be checked in a single test.
4535 4535 // We could optimize the cloneable test further, but we don't care.
4536 4536 generate_access_flags_guard(obj_klass,
4537 4537 // Test both conditions:
4538 4538 JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
4539 4539 // Must be cloneable but not finalizer:
4540 4540 JVM_ACC_IS_CLONEABLE,
4541 4541 slow_region);
4542 4542 }
4543 4543
4544 4544 if (!stopped()) {
4545 4545 // It's an instance, and it passed the slow-path tests.
4546 4546 PreserveJVMState pjvms(this);
4547 4547 Node* obj_size = NULL;
4548 4548 Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size);
4549 4549
4550 4550 copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
4551 4551
4552 4552 // Present the results of the slow call.
4553 4553 result_reg->init_req(_instance_path, control());
4554 4554 result_val->init_req(_instance_path, alloc_obj);
4555 4555 result_i_o ->set_req(_instance_path, i_o());
4556 4556 result_mem ->set_req(_instance_path, reset_memory());
4557 4557 }
4558 4558
4559 4559 // Generate code for the slow case. We make a call to clone().
4560 4560 set_control(_gvn.transform(slow_region));
4561 4561 if (!stopped()) {
4562 4562 PreserveJVMState pjvms(this);
4563 4563 CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4564 4564 Node* slow_result = set_results_for_java_call(slow_call);
4565 4565 // this->control() comes from set_results_for_java_call
4566 4566 result_reg->init_req(_slow_path, control());
4567 4567 result_val->init_req(_slow_path, slow_result);
4568 4568 result_i_o ->set_req(_slow_path, i_o());
4569 4569 result_mem ->set_req(_slow_path, reset_memory());
4570 4570 }
4571 4571
4572 4572 // Return the combined state.
4573 4573 set_control( _gvn.transform(result_reg));
4574 4574 set_i_o( _gvn.transform(result_i_o));
4575 4575 set_all_memory( _gvn.transform(result_mem));
4576 4576 } // original reexecute is set back here
4577 4577
4578 4578 set_result(_gvn.transform(result_val));
4579 4579 return true;
4580 4580 }
4581 4581
4582 4582 //------------------------------basictype2arraycopy----------------------------
4583 4583 address LibraryCallKit::basictype2arraycopy(BasicType t,
4584 4584 Node* src_offset,
4585 4585 Node* dest_offset,
4586 4586 bool disjoint_bases,
4587 4587 const char* &name,
4588 4588 bool dest_uninitialized) {
4589 4589 const TypeInt* src_offset_inttype = gvn().find_int_type(src_offset);;
4590 4590 const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;
4591 4591
4592 4592 bool aligned = false;
4593 4593 bool disjoint = disjoint_bases;
4594 4594
4595 4595 // if the offsets are the same, we can treat the memory regions as
4596 4596 // disjoint, because either the memory regions are in different arrays,
4597 4597 // or they are identical (which we can treat as disjoint.) We can also
4598 4598 // treat a copy with a destination index less that the source index
4599 4599 // as disjoint since a low->high copy will work correctly in this case.
4600 4600 if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
4601 4601 dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
4602 4602 // both indices are constants
4603 4603 int s_offs = src_offset_inttype->get_con();
4604 4604 int d_offs = dest_offset_inttype->get_con();
4605 4605 int element_size = type2aelembytes(t);
4606 4606 aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
4607 4607 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
4608 4608 if (s_offs >= d_offs) disjoint = true;
4609 4609 } else if (src_offset == dest_offset && src_offset != NULL) {
4610 4610 // This can occur if the offsets are identical non-constants.
4611 4611 disjoint = true;
4612 4612 }
4613 4613
4614 4614 return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized);
4615 4615 }
4616 4616
4617 4617
4618 4618 //------------------------------inline_arraycopy-----------------------
4619 4619 // public static native void java.lang.System.arraycopy(Object src, int srcPos,
4620 4620 // Object dest, int destPos,
4621 4621 // int length);
4622 4622 bool LibraryCallKit::inline_arraycopy() {
4623 4623 // Get the arguments.
4624 4624 Node* src = argument(0); // type: oop
4625 4625 Node* src_offset = argument(1); // type: int
4626 4626 Node* dest = argument(2); // type: oop
4627 4627 Node* dest_offset = argument(3); // type: int
4628 4628 Node* length = argument(4); // type: int
4629 4629
4630 4630 // Compile time checks. If any of these checks cannot be verified at compile time,
4631 4631 // we do not make a fast path for this call. Instead, we let the call remain as it
4632 4632 // is. The checks we choose to mandate at compile time are:
4633 4633 //
4634 4634 // (1) src and dest are arrays.
4635 4635 const Type* src_type = src->Value(&_gvn);
4636 4636 const Type* dest_type = dest->Value(&_gvn);
4637 4637 const TypeAryPtr* top_src = src_type->isa_aryptr();
4638 4638 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4639 4639
4640 4640 // Do we have the type of src?
4641 4641 bool has_src = (top_src != NULL && top_src->klass() != NULL);
4642 4642 // Do we have the type of dest?
4643 4643 bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4644 4644 // Is the type for src from speculation?
4645 4645 bool src_spec = false;
4646 4646 // Is the type for dest from speculation?
4647 4647 bool dest_spec = false;
4648 4648
4649 4649 if (!has_src || !has_dest) {
4650 4650 // We don't have sufficient type information, let's see if
4651 4651 // speculative types can help. We need to have types for both src
4652 4652 // and dest so that it pays off.
4653 4653
4654 4654 // Do we already have or could we have type information for src
4655 4655 bool could_have_src = has_src;
4656 4656 // Do we already have or could we have type information for dest
4657 4657 bool could_have_dest = has_dest;
4658 4658
4659 4659 ciKlass* src_k = NULL;
4660 4660 if (!has_src) {
4661 4661 src_k = src_type->speculative_type();
4662 4662 if (src_k != NULL && src_k->is_array_klass()) {
4663 4663 could_have_src = true;
4664 4664 }
4665 4665 }
4666 4666
4667 4667 ciKlass* dest_k = NULL;
4668 4668 if (!has_dest) {
4669 4669 dest_k = dest_type->speculative_type();
4670 4670 if (dest_k != NULL && dest_k->is_array_klass()) {
4671 4671 could_have_dest = true;
4672 4672 }
4673 4673 }
4674 4674
4675 4675 if (could_have_src && could_have_dest) {
4676 4676 // This is going to pay off so emit the required guards
4677 4677 if (!has_src) {
4678 4678 src = maybe_cast_profiled_obj(src, src_k);
4679 4679 src_type = _gvn.type(src);
4680 4680 top_src = src_type->isa_aryptr();
4681 4681 has_src = (top_src != NULL && top_src->klass() != NULL);
4682 4682 src_spec = true;
4683 4683 }
4684 4684 if (!has_dest) {
4685 4685 dest = maybe_cast_profiled_obj(dest, dest_k);
4686 4686 dest_type = _gvn.type(dest);
4687 4687 top_dest = dest_type->isa_aryptr();
4688 4688 has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4689 4689 dest_spec = true;
4690 4690 }
4691 4691 }
4692 4692 }
4693 4693
4694 4694 if (!has_src || !has_dest) {
4695 4695 // Conservatively insert a memory barrier on all memory slices.
4696 4696 // Do not let writes into the source float below the arraycopy.
4697 4697 insert_mem_bar(Op_MemBarCPUOrder);
4698 4698
4699 4699 // Call StubRoutines::generic_arraycopy stub.
4700 4700 generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
4701 4701 src, src_offset, dest, dest_offset, length);
4702 4702
4703 4703 // Do not let reads from the destination float above the arraycopy.
4704 4704 // Since we cannot type the arrays, we don't know which slices
4705 4705 // might be affected. We could restrict this barrier only to those
4706 4706 // memory slices which pertain to array elements--but don't bother.
4707 4707 if (!InsertMemBarAfterArraycopy)
4708 4708 // (If InsertMemBarAfterArraycopy, there is already one in place.)
4709 4709 insert_mem_bar(Op_MemBarCPUOrder);
4710 4710 return true;
4711 4711 }
4712 4712
4713 4713 // (2) src and dest arrays must have elements of the same BasicType
4714 4714 // Figure out the size and type of the elements we will be copying.
4715 4715 BasicType src_elem = top_src->klass()->as_array_klass()->element_type()->basic_type();
4716 4716 BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4717 4717 if (src_elem == T_ARRAY) src_elem = T_OBJECT;
4718 4718 if (dest_elem == T_ARRAY) dest_elem = T_OBJECT;
4719 4719
4720 4720 if (src_elem != dest_elem || dest_elem == T_VOID) {
4721 4721 // The component types are not the same or are not recognized. Punt.
4722 4722 // (But, avoid the native method wrapper to JVM_ArrayCopy.)
4723 4723 generate_slow_arraycopy(TypePtr::BOTTOM,
4724 4724 src, src_offset, dest, dest_offset, length,
4725 4725 /*dest_uninitialized*/false);
4726 4726 return true;
4727 4727 }
4728 4728
4729 4729 if (src_elem == T_OBJECT) {
4730 4730 // If both arrays are object arrays then having the exact types
4731 4731 // for both will remove the need for a subtype check at runtime
4732 4732 // before the call and may make it possible to pick a faster copy
4733 4733 // routine (without a subtype check on every element)
4734 4734 // Do we have the exact type of src?
4735 4735 bool could_have_src = src_spec;
4736 4736 // Do we have the exact type of dest?
4737 4737 bool could_have_dest = dest_spec;
4738 4738 ciKlass* src_k = top_src->klass();
4739 4739 ciKlass* dest_k = top_dest->klass();
4740 4740 if (!src_spec) {
4741 4741 src_k = src_type->speculative_type();
4742 4742 if (src_k != NULL && src_k->is_array_klass()) {
4743 4743 could_have_src = true;
4744 4744 }
4745 4745 }
4746 4746 if (!dest_spec) {
4747 4747 dest_k = dest_type->speculative_type();
4748 4748 if (dest_k != NULL && dest_k->is_array_klass()) {
4749 4749 could_have_dest = true;
4750 4750 }
4751 4751 }
4752 4752 if (could_have_src && could_have_dest) {
4753 4753 // If we can have both exact types, emit the missing guards
4754 4754 if (could_have_src && !src_spec) {
4755 4755 src = maybe_cast_profiled_obj(src, src_k);
4756 4756 }
4757 4757 if (could_have_dest && !dest_spec) {
4758 4758 dest = maybe_cast_profiled_obj(dest, dest_k);
4759 4759 }
4760 4760 }
4761 4761 }
4762 4762
4763 4763 //---------------------------------------------------------------------------
4764 4764 // We will make a fast path for this call to arraycopy.
4765 4765
4766 4766 // We have the following tests left to perform:
4767 4767 //
4768 4768 // (3) src and dest must not be null.
4769 4769 // (4) src_offset must not be negative.
4770 4770 // (5) dest_offset must not be negative.
4771 4771 // (6) length must not be negative.
4772 4772 // (7) src_offset + length must not exceed length of src.
4773 4773 // (8) dest_offset + length must not exceed length of dest.
4774 4774 // (9) each element of an oop array must be assignable
4775 4775
4776 4776 RegionNode* slow_region = new (C) RegionNode(1);
4777 4777 record_for_igvn(slow_region);
4778 4778
4779 4779 // (3) operands must not be null
4780 4780 // We currently perform our null checks with the null_check routine.
4781 4781 // This means that the null exceptions will be reported in the caller
4782 4782 // rather than (correctly) reported inside of the native arraycopy call.
4783 4783 // This should be corrected, given time. We do our null check with the
4784 4784 // stack pointer restored.
4785 4785 src = null_check(src, T_ARRAY);
4786 4786 dest = null_check(dest, T_ARRAY);
4787 4787
4788 4788 // (4) src_offset must not be negative.
4789 4789 generate_negative_guard(src_offset, slow_region);
4790 4790
4791 4791 // (5) dest_offset must not be negative.
4792 4792 generate_negative_guard(dest_offset, slow_region);
4793 4793
4794 4794 // (6) length must not be negative (moved to generate_arraycopy()).
4795 4795 // generate_negative_guard(length, slow_region);
4796 4796
4797 4797 // (7) src_offset + length must not exceed length of src.
4798 4798 generate_limit_guard(src_offset, length,
4799 4799 load_array_length(src),
4800 4800 slow_region);
4801 4801
4802 4802 // (8) dest_offset + length must not exceed length of dest.
4803 4803 generate_limit_guard(dest_offset, length,
4804 4804 load_array_length(dest),
4805 4805 slow_region);
4806 4806
4807 4807 // (9) each element of an oop array must be assignable
4808 4808 // The generate_arraycopy subroutine checks this.
4809 4809
4810 4810 // This is where the memory effects are placed:
4811 4811 const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
4812 4812 generate_arraycopy(adr_type, dest_elem,
4813 4813 src, src_offset, dest, dest_offset, length,
4814 4814 false, false, slow_region);
4815 4815
4816 4816 return true;
4817 4817 }
4818 4818
4819 4819 //-----------------------------generate_arraycopy----------------------
4820 4820 // Generate an optimized call to arraycopy.
4821 4821 // Caller must guard against non-arrays.
4822 4822 // Caller must determine a common array basic-type for both arrays.
4823 4823 // Caller must validate offsets against array bounds.
4824 4824 // The slow_region has already collected guard failure paths
4825 4825 // (such as out of bounds length or non-conformable array types).
4826 4826 // The generated code has this shape, in general:
4827 4827 //
4828 4828 // if (length == 0) return // via zero_path
4829 4829 // slowval = -1
4830 4830 // if (types unknown) {
4831 4831 // slowval = call generic copy loop
4832 4832 // if (slowval == 0) return // via checked_path
4833 4833 // } else if (indexes in bounds) {
4834 4834 // if ((is object array) && !(array type check)) {
4835 4835 // slowval = call checked copy loop
4836 4836 // if (slowval == 0) return // via checked_path
4837 4837 // } else {
4838 4838 // call bulk copy loop
4839 4839 // return // via fast_path
4840 4840 // }
4841 4841 // }
4842 4842 // // adjust params for remaining work:
4843 4843 // if (slowval != -1) {
4844 4844 // n = -1^slowval; src_offset += n; dest_offset += n; length -= n
4845 4845 // }
4846 4846 // slow_region:
4847 4847 // call slow arraycopy(src, src_offset, dest, dest_offset, length)
4848 4848 // return // via slow_call_path
4849 4849 //
4850 4850 // This routine is used from several intrinsics: System.arraycopy,
4851 4851 // Object.clone (the array subcase), and Arrays.copyOf[Range].
4852 4852 //
4853 4853 void
4854 4854 LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
4855 4855 BasicType basic_elem_type,
4856 4856 Node* src, Node* src_offset,
4857 4857 Node* dest, Node* dest_offset,
4858 4858 Node* copy_length,
4859 4859 bool disjoint_bases,
4860 4860 bool length_never_negative,
4861 4861 RegionNode* slow_region) {
4862 4862
4863 4863 if (slow_region == NULL) {
4864 4864 slow_region = new(C) RegionNode(1);
4865 4865 record_for_igvn(slow_region);
4866 4866 }
4867 4867
4868 4868 Node* original_dest = dest;
4869 4869 AllocateArrayNode* alloc = NULL; // used for zeroing, if needed
4870 4870 bool dest_uninitialized = false;
4871 4871
4872 4872 // See if this is the initialization of a newly-allocated array.
4873 4873 // If so, we will take responsibility here for initializing it to zero.
4874 4874 // (Note: Because tightly_coupled_allocation performs checks on the
4875 4875 // out-edges of the dest, we need to avoid making derived pointers
4876 4876 // from it until we have checked its uses.)
4877 4877 if (ReduceBulkZeroing
4878 4878 && !ZeroTLAB // pointless if already zeroed
4879 4879 && basic_elem_type != T_CONFLICT // avoid corner case
4880 4880 && !src->eqv_uncast(dest)
4881 4881 && ((alloc = tightly_coupled_allocation(dest, slow_region))
4882 4882 != NULL)
4883 4883 && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
4884 4884 && alloc->maybe_set_complete(&_gvn)) {
4885 4885 // "You break it, you buy it."
4886 4886 InitializeNode* init = alloc->initialization();
4887 4887 assert(init->is_complete(), "we just did this");
4888 4888 init->set_complete_with_arraycopy();
4889 4889 assert(dest->is_CheckCastPP(), "sanity");
4890 4890 assert(dest->in(0)->in(0) == init, "dest pinned");
4891 4891 adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory
4892 4892 // From this point on, every exit path is responsible for
4893 4893 // initializing any non-copied parts of the object to zero.
4894 4894 // Also, if this flag is set we make sure that arraycopy interacts properly
4895 4895 // with G1, eliding pre-barriers. See CR 6627983.
4896 4896 dest_uninitialized = true;
4897 4897 } else {
4898 4898 // No zeroing elimination here.
4899 4899 alloc = NULL;
4900 4900 //original_dest = dest;
4901 4901 //dest_uninitialized = false;
4902 4902 }
4903 4903
4904 4904 // Results are placed here:
4905 4905 enum { fast_path = 1, // normal void-returning assembly stub
4906 4906 checked_path = 2, // special assembly stub with cleanup
4907 4907 slow_call_path = 3, // something went wrong; call the VM
4908 4908 zero_path = 4, // bypass when length of copy is zero
4909 4909 bcopy_path = 5, // copy primitive array by 64-bit blocks
4910 4910 PATH_LIMIT = 6
4911 4911 };
4912 4912 RegionNode* result_region = new(C) RegionNode(PATH_LIMIT);
4913 4913 PhiNode* result_i_o = new(C) PhiNode(result_region, Type::ABIO);
4914 4914 PhiNode* result_memory = new(C) PhiNode(result_region, Type::MEMORY, adr_type);
4915 4915 record_for_igvn(result_region);
4916 4916 _gvn.set_type_bottom(result_i_o);
4917 4917 _gvn.set_type_bottom(result_memory);
4918 4918 assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
4919 4919
4920 4920 // The slow_control path:
4921 4921 Node* slow_control;
4922 4922 Node* slow_i_o = i_o();
4923 4923 Node* slow_mem = memory(adr_type);
4924 4924 debug_only(slow_control = (Node*) badAddress);
4925 4925
4926 4926 // Checked control path:
4927 4927 Node* checked_control = top();
4928 4928 Node* checked_mem = NULL;
4929 4929 Node* checked_i_o = NULL;
4930 4930 Node* checked_value = NULL;
4931 4931
4932 4932 if (basic_elem_type == T_CONFLICT) {
4933 4933 assert(!dest_uninitialized, "");
4934 4934 Node* cv = generate_generic_arraycopy(adr_type,
4935 4935 src, src_offset, dest, dest_offset,
4936 4936 copy_length, dest_uninitialized);
4937 4937 if (cv == NULL) cv = intcon(-1); // failure (no stub available)
4938 4938 checked_control = control();
4939 4939 checked_i_o = i_o();
4940 4940 checked_mem = memory(adr_type);
4941 4941 checked_value = cv;
4942 4942 set_control(top()); // no fast path
4943 4943 }
4944 4944
4945 4945 Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
4946 4946 if (not_pos != NULL) {
4947 4947 PreserveJVMState pjvms(this);
4948 4948 set_control(not_pos);
4949 4949
4950 4950 // (6) length must not be negative.
4951 4951 if (!length_never_negative) {
4952 4952 generate_negative_guard(copy_length, slow_region);
4953 4953 }
4954 4954
4955 4955 // copy_length is 0.
4956 4956 if (!stopped() && dest_uninitialized) {
4957 4957 Node* dest_length = alloc->in(AllocateNode::ALength);
4958 4958 if (copy_length->eqv_uncast(dest_length)
4959 4959 || _gvn.find_int_con(dest_length, 1) <= 0) {
4960 4960 // There is no zeroing to do. No need for a secondary raw memory barrier.
4961 4961 } else {
4962 4962 // Clear the whole thing since there are no source elements to copy.
4963 4963 generate_clear_array(adr_type, dest, basic_elem_type,
4964 4964 intcon(0), NULL,
4965 4965 alloc->in(AllocateNode::AllocSize));
4966 4966 // Use a secondary InitializeNode as raw memory barrier.
4967 4967 // Currently it is needed only on this path since other
4968 4968 // paths have stub or runtime calls as raw memory barriers.
4969 4969 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
4970 4970 Compile::AliasIdxRaw,
4971 4971 top())->as_Initialize();
4972 4972 init->set_complete(&_gvn); // (there is no corresponding AllocateNode)
4973 4973 }
4974 4974 }
4975 4975
4976 4976 // Present the results of the fast call.
4977 4977 result_region->init_req(zero_path, control());
4978 4978 result_i_o ->init_req(zero_path, i_o());
4979 4979 result_memory->init_req(zero_path, memory(adr_type));
4980 4980 }
4981 4981
4982 4982 if (!stopped() && dest_uninitialized) {
4983 4983 // We have to initialize the *uncopied* part of the array to zero.
4984 4984 // The copy destination is the slice dest[off..off+len]. The other slices
4985 4985 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
4986 4986 Node* dest_size = alloc->in(AllocateNode::AllocSize);
4987 4987 Node* dest_length = alloc->in(AllocateNode::ALength);
4988 4988 Node* dest_tail = _gvn.transform(new(C) AddINode(dest_offset,
4989 4989 copy_length));
4990 4990
4991 4991 // If there is a head section that needs zeroing, do it now.
4992 4992 if (find_int_con(dest_offset, -1) != 0) {
4993 4993 generate_clear_array(adr_type, dest, basic_elem_type,
4994 4994 intcon(0), dest_offset,
4995 4995 NULL);
4996 4996 }
4997 4997
4998 4998 // Next, perform a dynamic check on the tail length.
4999 4999 // It is often zero, and we can win big if we prove this.
5000 5000 // There are two wins: Avoid generating the ClearArray
5001 5001 // with its attendant messy index arithmetic, and upgrade
5002 5002 // the copy to a more hardware-friendly word size of 64 bits.
5003 5003 Node* tail_ctl = NULL;
5004 5004 if (!stopped() && !dest_tail->eqv_uncast(dest_length)) {
5005 5005 Node* cmp_lt = _gvn.transform(new(C) CmpINode(dest_tail, dest_length));
5006 5006 Node* bol_lt = _gvn.transform(new(C) BoolNode(cmp_lt, BoolTest::lt));
5007 5007 tail_ctl = generate_slow_guard(bol_lt, NULL);
5008 5008 assert(tail_ctl != NULL || !stopped(), "must be an outcome");
5009 5009 }
5010 5010
5011 5011 // At this point, let's assume there is no tail.
5012 5012 if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
5013 5013 // There is no tail. Try an upgrade to a 64-bit copy.
5014 5014 bool didit = false;
5015 5015 { PreserveJVMState pjvms(this);
5016 5016 didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
5017 5017 src, src_offset, dest, dest_offset,
5018 5018 dest_size, dest_uninitialized);
5019 5019 if (didit) {
5020 5020 // Present the results of the block-copying fast call.
5021 5021 result_region->init_req(bcopy_path, control());
5022 5022 result_i_o ->init_req(bcopy_path, i_o());
5023 5023 result_memory->init_req(bcopy_path, memory(adr_type));
5024 5024 }
5025 5025 }
5026 5026 if (didit)
5027 5027 set_control(top()); // no regular fast path
5028 5028 }
5029 5029
5030 5030 // Clear the tail, if any.
5031 5031 if (tail_ctl != NULL) {
5032 5032 Node* notail_ctl = stopped() ? NULL : control();
5033 5033 set_control(tail_ctl);
5034 5034 if (notail_ctl == NULL) {
5035 5035 generate_clear_array(adr_type, dest, basic_elem_type,
5036 5036 dest_tail, NULL,
5037 5037 dest_size);
5038 5038 } else {
5039 5039 // Make a local merge.
5040 5040 Node* done_ctl = new(C) RegionNode(3);
5041 5041 Node* done_mem = new(C) PhiNode(done_ctl, Type::MEMORY, adr_type);
5042 5042 done_ctl->init_req(1, notail_ctl);
5043 5043 done_mem->init_req(1, memory(adr_type));
5044 5044 generate_clear_array(adr_type, dest, basic_elem_type,
5045 5045 dest_tail, NULL,
5046 5046 dest_size);
5047 5047 done_ctl->init_req(2, control());
5048 5048 done_mem->init_req(2, memory(adr_type));
5049 5049 set_control( _gvn.transform(done_ctl));
5050 5050 set_memory( _gvn.transform(done_mem), adr_type );
5051 5051 }
5052 5052 }
5053 5053 }
5054 5054
5055 5055 BasicType copy_type = basic_elem_type;
5056 5056 assert(basic_elem_type != T_ARRAY, "caller must fix this");
5057 5057 if (!stopped() && copy_type == T_OBJECT) {
5058 5058 // If src and dest have compatible element types, we can copy bits.
5059 5059 // Types S[] and D[] are compatible if D is a supertype of S.
5060 5060 //
5061 5061 // If they are not, we will use checked_oop_disjoint_arraycopy,
5062 5062 // which performs a fast optimistic per-oop check, and backs off
5063 5063 // further to JVM_ArrayCopy on the first per-oop check that fails.
5064 5064 // (Actually, we don't move raw bits only; the GC requires card marks.)
5065 5065
5066 5066 // Get the Klass* for both src and dest
5067 5067 Node* src_klass = load_object_klass(src);
5068 5068 Node* dest_klass = load_object_klass(dest);
5069 5069
5070 5070 // Generate the subtype check.
5071 5071 // This might fold up statically, or then again it might not.
5072 5072 //
5073 5073 // Non-static example: Copying List<String>.elements to a new String[].
5074 5074 // The backing store for a List<String> is always an Object[],
5075 5075 // but its elements are always type String, if the generic types
5076 5076 // are correct at the source level.
5077 5077 //
5078 5078 // Test S[] against D[], not S against D, because (probably)
5079 5079 // the secondary supertype cache is less busy for S[] than S.
5080 5080 // This usually only matters when D is an interface.
5081 5081 Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
5082 5082 // Plug failing path into checked_oop_disjoint_arraycopy
5083 5083 if (not_subtype_ctrl != top()) {
5084 5084 PreserveJVMState pjvms(this);
5085 5085 set_control(not_subtype_ctrl);
5086 5086 // (At this point we can assume disjoint_bases, since types differ.)
5087 5087 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5088 5088 Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5089 5089 Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5090 5090 Node* dest_elem_klass = _gvn.transform(n1);
5091 5091 Node* cv = generate_checkcast_arraycopy(adr_type,
5092 5092 dest_elem_klass,
5093 5093 src, src_offset, dest, dest_offset,
5094 5094 ConvI2X(copy_length), dest_uninitialized);
5095 5095 if (cv == NULL) cv = intcon(-1); // failure (no stub available)
5096 5096 checked_control = control();
5097 5097 checked_i_o = i_o();
5098 5098 checked_mem = memory(adr_type);
5099 5099 checked_value = cv;
5100 5100 }
5101 5101 // At this point we know we do not need type checks on oop stores.
5102 5102
5103 5103 // Let's see if we need card marks:
5104 5104 if (alloc != NULL && use_ReduceInitialCardMarks()) {
5105 5105 // If we do not need card marks, copy using the jint or jlong stub.
5106 5106 copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5107 5107 assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5108 5108 "sizes agree");
5109 5109 }
5110 5110 }
5111 5111
5112 5112 if (!stopped()) {
5113 5113 // Generate the fast path, if possible.
5114 5114 PreserveJVMState pjvms(this);
5115 5115 generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5116 5116 src, src_offset, dest, dest_offset,
5117 5117 ConvI2X(copy_length), dest_uninitialized);
5118 5118
5119 5119 // Present the results of the fast call.
5120 5120 result_region->init_req(fast_path, control());
5121 5121 result_i_o ->init_req(fast_path, i_o());
5122 5122 result_memory->init_req(fast_path, memory(adr_type));
5123 5123 }
5124 5124
5125 5125 // Here are all the slow paths up to this point, in one bundle:
5126 5126 slow_control = top();
5127 5127 if (slow_region != NULL)
5128 5128 slow_control = _gvn.transform(slow_region);
5129 5129 DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
5130 5130
5131 5131 set_control(checked_control);
5132 5132 if (!stopped()) {
5133 5133 // Clean up after the checked call.
5134 5134 // The returned value is either 0 or -1^K,
5135 5135 // where K = number of partially transferred array elements.
5136 5136 Node* cmp = _gvn.transform(new(C) CmpINode(checked_value, intcon(0)));
5137 5137 Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
5138 5138 IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
5139 5139
5140 5140 // If it is 0, we are done, so transfer to the end.
5141 5141 Node* checks_done = _gvn.transform(new(C) IfTrueNode(iff));
5142 5142 result_region->init_req(checked_path, checks_done);
5143 5143 result_i_o ->init_req(checked_path, checked_i_o);
5144 5144 result_memory->init_req(checked_path, checked_mem);
5145 5145
5146 5146 // If it is not zero, merge into the slow call.
5147 5147 set_control( _gvn.transform(new(C) IfFalseNode(iff) ));
5148 5148 RegionNode* slow_reg2 = new(C) RegionNode(3);
5149 5149 PhiNode* slow_i_o2 = new(C) PhiNode(slow_reg2, Type::ABIO);
5150 5150 PhiNode* slow_mem2 = new(C) PhiNode(slow_reg2, Type::MEMORY, adr_type);
5151 5151 record_for_igvn(slow_reg2);
5152 5152 slow_reg2 ->init_req(1, slow_control);
5153 5153 slow_i_o2 ->init_req(1, slow_i_o);
5154 5154 slow_mem2 ->init_req(1, slow_mem);
5155 5155 slow_reg2 ->init_req(2, control());
5156 5156 slow_i_o2 ->init_req(2, checked_i_o);
5157 5157 slow_mem2 ->init_req(2, checked_mem);
5158 5158
5159 5159 slow_control = _gvn.transform(slow_reg2);
5160 5160 slow_i_o = _gvn.transform(slow_i_o2);
5161 5161 slow_mem = _gvn.transform(slow_mem2);
5162 5162
5163 5163 if (alloc != NULL) {
5164 5164 // We'll restart from the very beginning, after zeroing the whole thing.
5165 5165 // This can cause double writes, but that's OK since dest is brand new.
5166 5166 // So we ignore the low 31 bits of the value returned from the stub.
5167 5167 } else {
5168 5168 // We must continue the copy exactly where it failed, or else
5169 5169 // another thread might see the wrong number of writes to dest.
5170 5170 Node* checked_offset = _gvn.transform(new(C) XorINode(checked_value, intcon(-1)));
5171 5171 Node* slow_offset = new(C) PhiNode(slow_reg2, TypeInt::INT);
5172 5172 slow_offset->init_req(1, intcon(0));
5173 5173 slow_offset->init_req(2, checked_offset);
5174 5174 slow_offset = _gvn.transform(slow_offset);
5175 5175
5176 5176 // Adjust the arguments by the conditionally incoming offset.
5177 5177 Node* src_off_plus = _gvn.transform(new(C) AddINode(src_offset, slow_offset));
5178 5178 Node* dest_off_plus = _gvn.transform(new(C) AddINode(dest_offset, slow_offset));
5179 5179 Node* length_minus = _gvn.transform(new(C) SubINode(copy_length, slow_offset));
5180 5180
5181 5181 // Tweak the node variables to adjust the code produced below:
5182 5182 src_offset = src_off_plus;
5183 5183 dest_offset = dest_off_plus;
5184 5184 copy_length = length_minus;
5185 5185 }
5186 5186 }
5187 5187
5188 5188 set_control(slow_control);
5189 5189 if (!stopped()) {
5190 5190 // Generate the slow path, if needed.
5191 5191 PreserveJVMState pjvms(this); // replace_in_map may trash the map
5192 5192
5193 5193 set_memory(slow_mem, adr_type);
5194 5194 set_i_o(slow_i_o);
5195 5195
5196 5196 if (dest_uninitialized) {
5197 5197 generate_clear_array(adr_type, dest, basic_elem_type,
5198 5198 intcon(0), NULL,
5199 5199 alloc->in(AllocateNode::AllocSize));
5200 5200 }
5201 5201
5202 5202 generate_slow_arraycopy(adr_type,
5203 5203 src, src_offset, dest, dest_offset,
5204 5204 copy_length, /*dest_uninitialized*/false);
5205 5205
5206 5206 result_region->init_req(slow_call_path, control());
5207 5207 result_i_o ->init_req(slow_call_path, i_o());
5208 5208 result_memory->init_req(slow_call_path, memory(adr_type));
5209 5209 }
5210 5210
5211 5211 // Remove unused edges.
5212 5212 for (uint i = 1; i < result_region->req(); i++) {
5213 5213 if (result_region->in(i) == NULL)
5214 5214 result_region->init_req(i, top());
5215 5215 }
5216 5216
5217 5217 // Finished; return the combined state.
5218 5218 set_control( _gvn.transform(result_region));
5219 5219 set_i_o( _gvn.transform(result_i_o) );
5220 5220 set_memory( _gvn.transform(result_memory), adr_type );
5221 5221
5222 5222 // The memory edges above are precise in order to model effects around
5223 5223 // array copies accurately to allow value numbering of field loads around
5224 5224 // arraycopy. Such field loads, both before and after, are common in Java
5225 5225 // collections and similar classes involving header/array data structures.
5226 5226 //
5227 5227 // But with low number of register or when some registers are used or killed
5228 5228 // by arraycopy calls it causes registers spilling on stack. See 6544710.
5229 5229 // The next memory barrier is added to avoid it. If the arraycopy can be
5230 5230 // optimized away (which it can, sometimes) then we can manually remove
5231 5231 // the membar also.
5232 5232 //
5233 5233 // Do not let reads from the cloned object float above the arraycopy.
5234 5234 if (alloc != NULL) {
5235 5235 // Do not let stores that initialize this object be reordered with
5236 5236 // a subsequent store that would make this object accessible by
5237 5237 // other threads.
5238 5238 // Record what AllocateNode this StoreStore protects so that
5239 5239 // escape analysis can go from the MemBarStoreStoreNode to the
5240 5240 // AllocateNode and eliminate the MemBarStoreStoreNode if possible
5241 5241 // based on the escape status of the AllocateNode.
5242 5242 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
5243 5243 } else if (InsertMemBarAfterArraycopy)
5244 5244 insert_mem_bar(Op_MemBarCPUOrder);
5245 5245 }
5246 5246
5247 5247
5248 5248 // Helper function which determines if an arraycopy immediately follows
5249 5249 // an allocation, with no intervening tests or other escapes for the object.
5250 5250 AllocateArrayNode*
5251 5251 LibraryCallKit::tightly_coupled_allocation(Node* ptr,
5252 5252 RegionNode* slow_region) {
5253 5253 if (stopped()) return NULL; // no fast path
5254 5254 if (C->AliasLevel() == 0) return NULL; // no MergeMems around
5255 5255
5256 5256 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
5257 5257 if (alloc == NULL) return NULL;
5258 5258
5259 5259 Node* rawmem = memory(Compile::AliasIdxRaw);
5260 5260 // Is the allocation's memory state untouched?
5261 5261 if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
5262 5262 // Bail out if there have been raw-memory effects since the allocation.
5263 5263 // (Example: There might have been a call or safepoint.)
5264 5264 return NULL;
5265 5265 }
5266 5266 rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
5267 5267 if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
5268 5268 return NULL;
5269 5269 }
5270 5270
5271 5271 // There must be no unexpected observers of this allocation.
5272 5272 for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
5273 5273 Node* obs = ptr->fast_out(i);
5274 5274 if (obs != this->map()) {
5275 5275 return NULL;
5276 5276 }
5277 5277 }
5278 5278
5279 5279 // This arraycopy must unconditionally follow the allocation of the ptr.
5280 5280 Node* alloc_ctl = ptr->in(0);
5281 5281 assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
5282 5282
5283 5283 Node* ctl = control();
5284 5284 while (ctl != alloc_ctl) {
5285 5285 // There may be guards which feed into the slow_region.
5286 5286 // Any other control flow means that we might not get a chance
5287 5287 // to finish initializing the allocated object.
5288 5288 if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
5289 5289 IfNode* iff = ctl->in(0)->as_If();
5290 5290 Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
5291 5291 assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
5292 5292 if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
5293 5293 ctl = iff->in(0); // This test feeds the known slow_region.
5294 5294 continue;
5295 5295 }
5296 5296 // One more try: Various low-level checks bottom out in
5297 5297 // uncommon traps. If the debug-info of the trap omits
5298 5298 // any reference to the allocation, as we've already
5299 5299 // observed, then there can be no objection to the trap.
5300 5300 bool found_trap = false;
5301 5301 for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
5302 5302 Node* obs = not_ctl->fast_out(j);
5303 5303 if (obs->in(0) == not_ctl && obs->is_Call() &&
5304 5304 (obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
5305 5305 found_trap = true; break;
5306 5306 }
5307 5307 }
5308 5308 if (found_trap) {
5309 5309 ctl = iff->in(0); // This test feeds a harmless uncommon trap.
5310 5310 continue;
5311 5311 }
5312 5312 }
5313 5313 return NULL;
5314 5314 }
5315 5315
5316 5316 // If we get this far, we have an allocation which immediately
5317 5317 // precedes the arraycopy, and we can take over zeroing the new object.
5318 5318 // The arraycopy will finish the initialization, and provide
5319 5319 // a new control state to which we will anchor the destination pointer.
5320 5320
5321 5321 return alloc;
5322 5322 }
5323 5323
5324 5324 // Helper for initialization of arrays, creating a ClearArray.
5325 5325 // It writes zero bits in [start..end), within the body of an array object.
5326 5326 // The memory effects are all chained onto the 'adr_type' alias category.
5327 5327 //
5328 5328 // Since the object is otherwise uninitialized, we are free
5329 5329 // to put a little "slop" around the edges of the cleared area,
5330 5330 // as long as it does not go back into the array's header,
5331 5331 // or beyond the array end within the heap.
5332 5332 //
5333 5333 // The lower edge can be rounded down to the nearest jint and the
5334 5334 // upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
5335 5335 //
5336 5336 // Arguments:
5337 5337 // adr_type memory slice where writes are generated
5338 5338 // dest oop of the destination array
5339 5339 // basic_elem_type element type of the destination
5340 5340 // slice_idx array index of first element to store
5341 5341 // slice_len number of elements to store (or NULL)
5342 5342 // dest_size total size in bytes of the array object
5343 5343 //
5344 5344 // Exactly one of slice_len or dest_size must be non-NULL.
5345 5345 // If dest_size is non-NULL, zeroing extends to the end of the object.
5346 5346 // If slice_len is non-NULL, the slice_idx value must be a constant.
5347 5347 void
5348 5348 LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
5349 5349 Node* dest,
5350 5350 BasicType basic_elem_type,
5351 5351 Node* slice_idx,
5352 5352 Node* slice_len,
5353 5353 Node* dest_size) {
5354 5354 // one or the other but not both of slice_len and dest_size:
5355 5355 assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
5356 5356 if (slice_len == NULL) slice_len = top();
5357 5357 if (dest_size == NULL) dest_size = top();
5358 5358
5359 5359 // operate on this memory slice:
5360 5360 Node* mem = memory(adr_type); // memory slice to operate on
5361 5361
5362 5362 // scaling and rounding of indexes:
5363 5363 int scale = exact_log2(type2aelembytes(basic_elem_type));
5364 5364 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5365 5365 int clear_low = (-1 << scale) & (BytesPerInt - 1);
5366 5366 int bump_bit = (-1 << scale) & BytesPerInt;
5367 5367
5368 5368 // determine constant starts and ends
5369 5369 const intptr_t BIG_NEG = -128;
5370 5370 assert(BIG_NEG + 2*abase < 0, "neg enough");
5371 5371 intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG);
5372 5372 intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG);
5373 5373 if (slice_len_con == 0) {
5374 5374 return; // nothing to do here
5375 5375 }
5376 5376 intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
5377 5377 intptr_t end_con = find_intptr_t_con(dest_size, -1);
5378 5378 if (slice_idx_con >= 0 && slice_len_con >= 0) {
5379 5379 assert(end_con < 0, "not two cons");
5380 5380 end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
5381 5381 BytesPerLong);
5382 5382 }
5383 5383
5384 5384 if (start_con >= 0 && end_con >= 0) {
5385 5385 // Constant start and end. Simple.
5386 5386 mem = ClearArrayNode::clear_memory(control(), mem, dest,
5387 5387 start_con, end_con, &_gvn);
5388 5388 } else if (start_con >= 0 && dest_size != top()) {
5389 5389 // Constant start, pre-rounded end after the tail of the array.
5390 5390 Node* end = dest_size;
5391 5391 mem = ClearArrayNode::clear_memory(control(), mem, dest,
5392 5392 start_con, end, &_gvn);
5393 5393 } else if (start_con >= 0 && slice_len != top()) {
5394 5394 // Constant start, non-constant end. End needs rounding up.
5395 5395 // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
5396 5396 intptr_t end_base = abase + (slice_idx_con << scale);
5397 5397 int end_round = (-1 << scale) & (BytesPerLong - 1);
5398 5398 Node* end = ConvI2X(slice_len);
5399 5399 if (scale != 0)
5400 5400 end = _gvn.transform(new(C) LShiftXNode(end, intcon(scale) ));
5401 5401 end_base += end_round;
5402 5402 end = _gvn.transform(new(C) AddXNode(end, MakeConX(end_base)));
5403 5403 end = _gvn.transform(new(C) AndXNode(end, MakeConX(~end_round)));
5404 5404 mem = ClearArrayNode::clear_memory(control(), mem, dest,
5405 5405 start_con, end, &_gvn);
5406 5406 } else if (start_con < 0 && dest_size != top()) {
5407 5407 // Non-constant start, pre-rounded end after the tail of the array.
5408 5408 // This is almost certainly a "round-to-end" operation.
5409 5409 Node* start = slice_idx;
5410 5410 start = ConvI2X(start);
5411 5411 if (scale != 0)
5412 5412 start = _gvn.transform(new(C) LShiftXNode( start, intcon(scale) ));
5413 5413 start = _gvn.transform(new(C) AddXNode(start, MakeConX(abase)));
5414 5414 if ((bump_bit | clear_low) != 0) {
5415 5415 int to_clear = (bump_bit | clear_low);
5416 5416 // Align up mod 8, then store a jint zero unconditionally
5417 5417 // just before the mod-8 boundary.
5418 5418 if (((abase + bump_bit) & ~to_clear) - bump_bit
5419 5419 < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
5420 5420 bump_bit = 0;
5421 5421 assert((abase & to_clear) == 0, "array base must be long-aligned");
5422 5422 } else {
5423 5423 // Bump 'start' up to (or past) the next jint boundary:
5424 5424 start = _gvn.transform(new(C) AddXNode(start, MakeConX(bump_bit)));
5425 5425 assert((abase & clear_low) == 0, "array base must be int-aligned");
5426 5426 }
5427 5427 // Round bumped 'start' down to jlong boundary in body of array.
5428 5428 start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear)));
5429 5429 if (bump_bit != 0) {
5430 5430 // Store a zero to the immediately preceding jint:
5431 5431 Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit)));
5432 5432 Node* p1 = basic_plus_adr(dest, x1);
5433 5433 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
5434 5434 mem = _gvn.transform(mem);
5435 5435 }
5436 5436 }
5437 5437 Node* end = dest_size; // pre-rounded
5438 5438 mem = ClearArrayNode::clear_memory(control(), mem, dest,
5439 5439 start, end, &_gvn);
5440 5440 } else {
5441 5441 // Non-constant start, unrounded non-constant end.
5442 5442 // (Nobody zeroes a random midsection of an array using this routine.)
5443 5443 ShouldNotReachHere(); // fix caller
5444 5444 }
5445 5445
5446 5446 // Done.
5447 5447 set_memory(mem, adr_type);
5448 5448 }
5449 5449
5450 5450
5451 5451 bool
5452 5452 LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
5453 5453 BasicType basic_elem_type,
5454 5454 AllocateNode* alloc,
5455 5455 Node* src, Node* src_offset,
5456 5456 Node* dest, Node* dest_offset,
5457 5457 Node* dest_size, bool dest_uninitialized) {
5458 5458 // See if there is an advantage from block transfer.
5459 5459 int scale = exact_log2(type2aelembytes(basic_elem_type));
5460 5460 if (scale >= LogBytesPerLong)
5461 5461 return false; // it is already a block transfer
5462 5462
5463 5463 // Look at the alignment of the starting offsets.
5464 5464 int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5465 5465
5466 5466 intptr_t src_off_con = (intptr_t) find_int_con(src_offset, -1);
5467 5467 intptr_t dest_off_con = (intptr_t) find_int_con(dest_offset, -1);
5468 5468 if (src_off_con < 0 || dest_off_con < 0)
5469 5469 // At present, we can only understand constants.
5470 5470 return false;
5471 5471
5472 5472 intptr_t src_off = abase + (src_off_con << scale);
5473 5473 intptr_t dest_off = abase + (dest_off_con << scale);
5474 5474
5475 5475 if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
5476 5476 // Non-aligned; too bad.
5477 5477 // One more chance: Pick off an initial 32-bit word.
5478 5478 // This is a common case, since abase can be odd mod 8.
5479 5479 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
5480 5480 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
5481 5481 Node* sptr = basic_plus_adr(src, src_off);
5482 5482 Node* dptr = basic_plus_adr(dest, dest_off);
5483 5483 Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
5484 5484 store_to_memory(control(), dptr, sval, T_INT, adr_type, MemNode::unordered);
5485 5485 src_off += BytesPerInt;
5486 5486 dest_off += BytesPerInt;
5487 5487 } else {
5488 5488 return false;
5489 5489 }
5490 5490 }
5491 5491 assert(src_off % BytesPerLong == 0, "");
5492 5492 assert(dest_off % BytesPerLong == 0, "");
5493 5493
5494 5494 // Do this copy by giant steps.
5495 5495 Node* sptr = basic_plus_adr(src, src_off);
5496 5496 Node* dptr = basic_plus_adr(dest, dest_off);
5497 5497 Node* countx = dest_size;
5498 5498 countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(dest_off)));
5499 5499 countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong)));
5500 5500
5501 5501 bool disjoint_bases = true; // since alloc != NULL
5502 5502 generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
5503 5503 sptr, NULL, dptr, NULL, countx, dest_uninitialized);
5504 5504
5505 5505 return true;
5506 5506 }
5507 5507
5508 5508
5509 5509 // Helper function; generates code for the slow case.
5510 5510 // We make a call to a runtime method which emulates the native method,
5511 5511 // but without the native wrapper overhead.
5512 5512 void
5513 5513 LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
5514 5514 Node* src, Node* src_offset,
5515 5515 Node* dest, Node* dest_offset,
5516 5516 Node* copy_length, bool dest_uninitialized) {
5517 5517 assert(!dest_uninitialized, "Invariant");
5518 5518 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
5519 5519 OptoRuntime::slow_arraycopy_Type(),
5520 5520 OptoRuntime::slow_arraycopy_Java(),
5521 5521 "slow_arraycopy", adr_type,
5522 5522 src, src_offset, dest, dest_offset,
5523 5523 copy_length);
5524 5524
5525 5525 // Handle exceptions thrown by this fellow:
5526 5526 make_slow_call_ex(call, env()->Throwable_klass(), false);
5527 5527 }
5528 5528
5529 5529 // Helper function; generates code for cases requiring runtime checks.
5530 5530 Node*
5531 5531 LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
5532 5532 Node* dest_elem_klass,
5533 5533 Node* src, Node* src_offset,
5534 5534 Node* dest, Node* dest_offset,
5535 5535 Node* copy_length, bool dest_uninitialized) {
5536 5536 if (stopped()) return NULL;
5537 5537
5538 5538 address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
5539 5539 if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5540 5540 return NULL;
5541 5541 }
5542 5542
5543 5543 // Pick out the parameters required to perform a store-check
5544 5544 // for the target array. This is an optimistic check. It will
5545 5545 // look in each non-null element's class, at the desired klass's
5546 5546 // super_check_offset, for the desired klass.
5547 5547 int sco_offset = in_bytes(Klass::super_check_offset_offset());
5548 5548 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
5549 5549 Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
5550 5550 Node* check_offset = ConvI2X(_gvn.transform(n3));
5551 5551 Node* check_value = dest_elem_klass;
5552 5552
5553 5553 Node* src_start = array_element_address(src, src_offset, T_OBJECT);
5554 5554 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
5555 5555
5556 5556 // (We know the arrays are never conjoint, because their types differ.)
5557 5557 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5558 5558 OptoRuntime::checkcast_arraycopy_Type(),
5559 5559 copyfunc_addr, "checkcast_arraycopy", adr_type,
5560 5560 // five arguments, of which two are
5561 5561 // intptr_t (jlong in LP64)
5562 5562 src_start, dest_start,
5563 5563 copy_length XTOP,
5564 5564 check_offset XTOP,
5565 5565 check_value);
5566 5566
5567 5567 return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5568 5568 }
5569 5569
5570 5570
5571 5571 // Helper function; generates code for cases requiring runtime checks.
5572 5572 Node*
5573 5573 LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
5574 5574 Node* src, Node* src_offset,
5575 5575 Node* dest, Node* dest_offset,
5576 5576 Node* copy_length, bool dest_uninitialized) {
5577 5577 assert(!dest_uninitialized, "Invariant");
5578 5578 if (stopped()) return NULL;
5579 5579 address copyfunc_addr = StubRoutines::generic_arraycopy();
5580 5580 if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5581 5581 return NULL;
5582 5582 }
5583 5583
5584 5584 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5585 5585 OptoRuntime::generic_arraycopy_Type(),
5586 5586 copyfunc_addr, "generic_arraycopy", adr_type,
5587 5587 src, src_offset, dest, dest_offset, copy_length);
5588 5588
5589 5589 return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5590 5590 }
5591 5591
5592 5592 // Helper function; generates the fast out-of-line call to an arraycopy stub.
5593 5593 void
5594 5594 LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
5595 5595 BasicType basic_elem_type,
5596 5596 bool disjoint_bases,
5597 5597 Node* src, Node* src_offset,
5598 5598 Node* dest, Node* dest_offset,
5599 5599 Node* copy_length, bool dest_uninitialized) {
5600 5600 if (stopped()) return; // nothing to do
5601 5601
5602 5602 Node* src_start = src;
5603 5603 Node* dest_start = dest;
5604 5604 if (src_offset != NULL || dest_offset != NULL) {
5605 5605 assert(src_offset != NULL && dest_offset != NULL, "");
5606 5606 src_start = array_element_address(src, src_offset, basic_elem_type);
5607 5607 dest_start = array_element_address(dest, dest_offset, basic_elem_type);
5608 5608 }
5609 5609
5610 5610 // Figure out which arraycopy runtime method to call.
5611 5611 const char* copyfunc_name = "arraycopy";
5612 5612 address copyfunc_addr =
5613 5613 basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
5614 5614 disjoint_bases, copyfunc_name, dest_uninitialized);
5615 5615
5616 5616 // Call it. Note that the count_ix value is not scaled to a byte-size.
5617 5617 make_runtime_call(RC_LEAF|RC_NO_FP,
5618 5618 OptoRuntime::fast_arraycopy_Type(),
5619 5619 copyfunc_addr, copyfunc_name, adr_type,
5620 5620 src_start, dest_start, copy_length XTOP);
5621 5621 }
5622 5622
5623 5623 //-------------inline_encodeISOArray-----------------------------------
5624 5624 // encode char[] to byte[] in ISO_8859_1
5625 5625 bool LibraryCallKit::inline_encodeISOArray() {
5626 5626 assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
5627 5627 // no receiver since it is static method
5628 5628 Node *src = argument(0);
5629 5629 Node *src_offset = argument(1);
5630 5630 Node *dst = argument(2);
5631 5631 Node *dst_offset = argument(3);
5632 5632 Node *length = argument(4);
5633 5633
5634 5634 const Type* src_type = src->Value(&_gvn);
5635 5635 const Type* dst_type = dst->Value(&_gvn);
5636 5636 const TypeAryPtr* top_src = src_type->isa_aryptr();
5637 5637 const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5638 5638 if (top_src == NULL || top_src->klass() == NULL ||
5639 5639 top_dest == NULL || top_dest->klass() == NULL) {
5640 5640 // failed array check
5641 5641 return false;
5642 5642 }
5643 5643
5644 5644 // Figure out the size and type of the elements we will be copying.
5645 5645 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5646 5646 BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5647 5647 if (src_elem != T_CHAR || dst_elem != T_BYTE) {
5648 5648 return false;
5649 5649 }
5650 5650 Node* src_start = array_element_address(src, src_offset, src_elem);
5651 5651 Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5652 5652 // 'src_start' points to src array + scaled offset
5653 5653 // 'dst_start' points to dst array + scaled offset
5654 5654
5655 5655 const TypeAryPtr* mtype = TypeAryPtr::BYTES;
5656 5656 Node* enc = new (C) EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
5657 5657 enc = _gvn.transform(enc);
5658 5658 Node* res_mem = _gvn.transform(new (C) SCMemProjNode(enc));
5659 5659 set_memory(res_mem, mtype);
5660 5660 set_result(enc);
5661 5661 return true;
5662 5662 }
5663 5663
5664 5664 /**
5665 5665 * Calculate CRC32 for byte.
5666 5666 * int java.util.zip.CRC32.update(int crc, int b)
5667 5667 */
5668 5668 bool LibraryCallKit::inline_updateCRC32() {
5669 5669 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5670 5670 assert(callee()->signature()->size() == 2, "update has 2 parameters");
5671 5671 // no receiver since it is static method
5672 5672 Node* crc = argument(0); // type: int
5673 5673 Node* b = argument(1); // type: int
5674 5674
5675 5675 /*
5676 5676 * int c = ~ crc;
5677 5677 * b = timesXtoThe32[(b ^ c) & 0xFF];
5678 5678 * b = b ^ (c >>> 8);
5679 5679 * crc = ~b;
5680 5680 */
5681 5681
5682 5682 Node* M1 = intcon(-1);
5683 5683 crc = _gvn.transform(new (C) XorINode(crc, M1));
5684 5684 Node* result = _gvn.transform(new (C) XorINode(crc, b));
5685 5685 result = _gvn.transform(new (C) AndINode(result, intcon(0xFF)));
5686 5686
5687 5687 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
5688 5688 Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2)));
5689 5689 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
5690 5690 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
5691 5691
5692 5692 crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8)));
5693 5693 result = _gvn.transform(new (C) XorINode(crc, result));
5694 5694 result = _gvn.transform(new (C) XorINode(result, M1));
5695 5695 set_result(result);
5696 5696 return true;
5697 5697 }
5698 5698
5699 5699 /**
5700 5700 * Calculate CRC32 for byte[] array.
5701 5701 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
5702 5702 */
5703 5703 bool LibraryCallKit::inline_updateBytesCRC32() {
5704 5704 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5705 5705 assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5706 5706 // no receiver since it is static method
5707 5707 Node* crc = argument(0); // type: int
5708 5708 Node* src = argument(1); // type: oop
5709 5709 Node* offset = argument(2); // type: int
5710 5710 Node* length = argument(3); // type: int
5711 5711
5712 5712 const Type* src_type = src->Value(&_gvn);
5713 5713 const TypeAryPtr* top_src = src_type->isa_aryptr();
5714 5714 if (top_src == NULL || top_src->klass() == NULL) {
5715 5715 // failed array check
5716 5716 return false;
5717 5717 }
5718 5718
5719 5719 // Figure out the size and type of the elements we will be copying.
5720 5720 BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5721 5721 if (src_elem != T_BYTE) {
5722 5722 return false;
5723 5723 }
5724 5724
5725 5725 // 'src_start' points to src array + scaled offset
5726 5726 Node* src_start = array_element_address(src, offset, src_elem);
5727 5727
5728 5728 // We assume that range check is done by caller.
5729 5729 // TODO: generate range check (offset+length < src.length) in debug VM.
5730 5730
5731 5731 // Call the stub.
5732 5732 address stubAddr = StubRoutines::updateBytesCRC32();
5733 5733 const char *stubName = "updateBytesCRC32";
5734 5734
5735 5735 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5736 5736 stubAddr, stubName, TypePtr::BOTTOM,
5737 5737 crc, src_start, length);
5738 5738 Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5739 5739 set_result(result);
5740 5740 return true;
5741 5741 }
5742 5742
5743 5743 /**
5744 5744 * Calculate CRC32 for ByteBuffer.
5745 5745 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
5746 5746 */
5747 5747 bool LibraryCallKit::inline_updateByteBufferCRC32() {
5748 5748 assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5749 5749 assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5750 5750 // no receiver since it is static method
5751 5751 Node* crc = argument(0); // type: int
5752 5752 Node* src = argument(1); // type: long
5753 5753 Node* offset = argument(3); // type: int
5754 5754 Node* length = argument(4); // type: int
5755 5755
5756 5756 src = ConvL2X(src); // adjust Java long to machine word
5757 5757 Node* base = _gvn.transform(new (C) CastX2PNode(src));
5758 5758 offset = ConvI2X(offset);
5759 5759
5760 5760 // 'src_start' points to src array + scaled offset
5761 5761 Node* src_start = basic_plus_adr(top(), base, offset);
5762 5762
5763 5763 // Call the stub.
5764 5764 address stubAddr = StubRoutines::updateBytesCRC32();
5765 5765 const char *stubName = "updateBytesCRC32";
5766 5766
5767 5767 Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5768 5768 stubAddr, stubName, TypePtr::BOTTOM,
5769 5769 crc, src_start, length);
5770 5770 Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5771 5771 set_result(result);
5772 5772 return true;
5773 5773 }
5774 5774
5775 5775 //----------------------------inline_reference_get----------------------------
5776 5776 // public T java.lang.ref.Reference.get();
5777 5777 bool LibraryCallKit::inline_reference_get() {
5778 5778 const int referent_offset = java_lang_ref_Reference::referent_offset;
5779 5779 guarantee(referent_offset > 0, "should have already been set");
5780 5780
5781 5781 // Get the argument:
5782 5782 Node* reference_obj = null_check_receiver();
5783 5783 if (stopped()) return true;
5784 5784
5785 5785 Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
5786 5786
5787 5787 ciInstanceKlass* klass = env()->Object_klass();
5788 5788 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5789 5789
5790 5790 Node* no_ctrl = NULL;
5791 5791 Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
5792 5792
5793 5793 // Use the pre-barrier to record the value in the referent field
5794 5794 pre_barrier(false /* do_load */,
5795 5795 control(),
5796 5796 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
5797 5797 result /* pre_val */,
5798 5798 T_OBJECT);
5799 5799
5800 5800 // Add memory barrier to prevent commoning reads from this field
5801 5801 // across safepoint since GC can change its value.
5802 5802 insert_mem_bar(Op_MemBarCPUOrder);
5803 5803
5804 5804 set_result(result);
5805 5805 return true;
5806 5806 }
5807 5807
5808 5808
5809 5809 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5810 5810 bool is_exact=true, bool is_static=false) {
5811 5811
5812 5812 const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5813 5813 assert(tinst != NULL, "obj is null");
5814 5814 assert(tinst->klass()->is_loaded(), "obj is not loaded");
5815 5815 assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5816 5816
5817 5817 ciField* field = tinst->klass()->as_instance_klass()->get_field_by_name(ciSymbol::make(fieldName),
5818 5818 ciSymbol::make(fieldTypeString),
5819 5819 is_static);
5820 5820 if (field == NULL) return (Node *) NULL;
5821 5821 assert (field != NULL, "undefined field");
5822 5822
5823 5823 // Next code copied from Parse::do_get_xxx():
5824 5824
5825 5825 // Compute address and memory type.
5826 5826 int offset = field->offset_in_bytes();
5827 5827 bool is_vol = field->is_volatile();
5828 5828 ciType* field_klass = field->type();
5829 5829 assert(field_klass->is_loaded(), "should be loaded");
5830 5830 const TypePtr* adr_type = C->alias_type(field)->adr_type();
5831 5831 Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5832 5832 BasicType bt = field->layout_type();
5833 5833
5834 5834 // Build the resultant type of the load
5835 5835 const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5836 5836
5837 5837 // Build the load.
5838 5838 Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol);
5839 5839 return loadedField;
5840 5840 }
5841 5841
5842 5842
5843 5843 //------------------------------inline_aescrypt_Block-----------------------
5844 5844 bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
5845 5845 address stubAddr;
5846 5846 const char *stubName;
5847 5847 assert(UseAES, "need AES instruction support");
5848 5848
5849 5849 switch(id) {
5850 5850 case vmIntrinsics::_aescrypt_encryptBlock:
5851 5851 stubAddr = StubRoutines::aescrypt_encryptBlock();
5852 5852 stubName = "aescrypt_encryptBlock";
5853 5853 break;
5854 5854 case vmIntrinsics::_aescrypt_decryptBlock:
5855 5855 stubAddr = StubRoutines::aescrypt_decryptBlock();
5856 5856 stubName = "aescrypt_decryptBlock";
5857 5857 break;
5858 5858 }
5859 5859 if (stubAddr == NULL) return false;
5860 5860
5861 5861 Node* aescrypt_object = argument(0);
5862 5862 Node* src = argument(1);
5863 5863 Node* src_offset = argument(2);
5864 5864 Node* dest = argument(3);
5865 5865 Node* dest_offset = argument(4);
5866 5866
5867 5867 // (1) src and dest are arrays.
5868 5868 const Type* src_type = src->Value(&_gvn);
5869 5869 const Type* dest_type = dest->Value(&_gvn);
5870 5870 const TypeAryPtr* top_src = src_type->isa_aryptr();
5871 5871 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5872 5872 assert (top_src != NULL && top_src->klass() != NULL && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5873 5873
5874 5874 // for the quick and dirty code we will skip all the checks.
5875 5875 // we are just trying to get the call to be generated.
5876 5876 Node* src_start = src;
5877 5877 Node* dest_start = dest;
5878 5878 if (src_offset != NULL || dest_offset != NULL) {
5879 5879 assert(src_offset != NULL && dest_offset != NULL, "");
5880 5880 src_start = array_element_address(src, src_offset, T_BYTE);
5881 5881 dest_start = array_element_address(dest, dest_offset, T_BYTE);
5882 5882 }
5883 5883
5884 5884 // now need to get the start of its expanded key array
5885 5885 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5886 5886 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5887 5887 if (k_start == NULL) return false;
5888 5888
5889 5889 if (Matcher::pass_original_key_for_aes()) {
5890 5890 // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
5891 5891 // compatibility issues between Java key expansion and SPARC crypto instructions
5892 5892 Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5893 5893 if (original_k_start == NULL) return false;
5894 5894
5895 5895 // Call the stub.
5896 5896 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5897 5897 stubAddr, stubName, TypePtr::BOTTOM,
5898 5898 src_start, dest_start, k_start, original_k_start);
5899 5899 } else {
5900 5900 // Call the stub.
5901 5901 make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5902 5902 stubAddr, stubName, TypePtr::BOTTOM,
5903 5903 src_start, dest_start, k_start);
5904 5904 }
5905 5905
5906 5906 return true;
5907 5907 }
5908 5908
5909 5909 //------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
5910 5910 bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
5911 5911 address stubAddr;
5912 5912 const char *stubName;
5913 5913
5914 5914 assert(UseAES, "need AES instruction support");
5915 5915
5916 5916 switch(id) {
5917 5917 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
5918 5918 stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
5919 5919 stubName = "cipherBlockChaining_encryptAESCrypt";
5920 5920 break;
5921 5921 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
5922 5922 stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
5923 5923 stubName = "cipherBlockChaining_decryptAESCrypt";
5924 5924 break;
5925 5925 }
5926 5926 if (stubAddr == NULL) return false;
5927 5927
5928 5928 Node* cipherBlockChaining_object = argument(0);
5929 5929 Node* src = argument(1);
5930 5930 Node* src_offset = argument(2);
5931 5931 Node* len = argument(3);
5932 5932 Node* dest = argument(4);
5933 5933 Node* dest_offset = argument(5);
5934 5934
5935 5935 // (1) src and dest are arrays.
5936 5936 const Type* src_type = src->Value(&_gvn);
5937 5937 const Type* dest_type = dest->Value(&_gvn);
5938 5938 const TypeAryPtr* top_src = src_type->isa_aryptr();
5939 5939 const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5940 5940 assert (top_src != NULL && top_src->klass() != NULL
5941 5941 && top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5942 5942
5943 5943 // checks are the responsibility of the caller
5944 5944 Node* src_start = src;
5945 5945 Node* dest_start = dest;
5946 5946 if (src_offset != NULL || dest_offset != NULL) {
5947 5947 assert(src_offset != NULL && dest_offset != NULL, "");
5948 5948 src_start = array_element_address(src, src_offset, T_BYTE);
5949 5949 dest_start = array_element_address(dest, dest_offset, T_BYTE);
5950 5950 }
5951 5951
5952 5952 // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
5953 5953 // (because of the predicated logic executed earlier).
5954 5954 // so we cast it here safely.
5955 5955 // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5956 5956
5957 5957 Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
5958 5958 if (embeddedCipherObj == NULL) return false;
5959 5959
5960 5960 // cast it to what we know it will be at runtime
5961 5961 const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
5962 5962 assert(tinst != NULL, "CBC obj is null");
5963 5963 assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
5964 5964 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
5965 5965 if (!klass_AESCrypt->is_loaded()) return false;
5966 5966
5967 5967 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
5968 5968 const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
5969 5969 const TypeOopPtr* xtype = aklass->as_instance_type();
5970 5970 Node* aescrypt_object = new(C) CheckCastPPNode(control(), embeddedCipherObj, xtype);
5971 5971 aescrypt_object = _gvn.transform(aescrypt_object);
5972 5972
5973 5973 // we need to get the start of the aescrypt_object's expanded key array
5974 5974 Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5975 5975 if (k_start == NULL) return false;
5976 5976
5977 5977 // similarly, get the start address of the r vector
5978 5978 Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
5979 5979 if (objRvec == NULL) return false;
5980 5980 Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
5981 5981
5982 5982 Node* cbcCrypt;
5983 5983 if (Matcher::pass_original_key_for_aes()) {
5984 5984 // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
5985 5985 // compatibility issues between Java key expansion and SPARC crypto instructions
5986 5986 Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5987 5987 if (original_k_start == NULL) return false;
5988 5988
5989 5989 // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
5990 5990 cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5991 5991 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5992 5992 stubAddr, stubName, TypePtr::BOTTOM,
5993 5993 src_start, dest_start, k_start, r_start, len, original_k_start);
5994 5994 } else {
5995 5995 // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
5996 5996 cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
5997 5997 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
5998 5998 stubAddr, stubName, TypePtr::BOTTOM,
5999 5999 src_start, dest_start, k_start, r_start, len);
6000 6000 }
6001 6001
6002 6002 // return cipher length (int)
6003 6003 Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
6004 6004 set_result(retvalue);
6005 6005 return true;
6006 6006 }
6007 6007
6008 6008 //------------------------------get_key_start_from_aescrypt_object-----------------------
6009 6009 Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6010 6010 Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
6011 6011 assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6012 6012 if (objAESCryptKey == NULL) return (Node *) NULL;
6013 6013
6014 6014 // now have the array, need to get the start address of the K array
6015 6015 Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6016 6016 return k_start;
6017 6017 }
6018 6018
6019 6019 //------------------------------get_original_key_start_from_aescrypt_object-----------------------
6020 6020 Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6021 6021 Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6022 6022 assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6023 6023 if (objAESCryptKey == NULL) return (Node *) NULL;
6024 6024
6025 6025 // now have the array, need to get the start address of the lastKey array
6026 6026 Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6027 6027 return original_k_start;
6028 6028 }
6029 6029
6030 6030 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6031 6031 // Return node representing slow path of predicate check.
6032 6032 // the pseudo code we want to emulate with this predicate is:
6033 6033 // for encryption:
6034 6034 // if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6035 6035 // for decryption:
6036 6036 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6037 6037 // note cipher==plain is more conservative than the original java code but that's OK
6038 6038 //
6039 6039 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6040 6040 // First, check receiver for NULL since it is virtual method.
6041 6041 Node* objCBC = argument(0);
6042 6042 objCBC = null_check(objCBC);
6043 6043
6044 6044 if (stopped()) return NULL; // Always NULL
6045 6045
6046 6046 // Load embeddedCipher field of CipherBlockChaining object.
6047 6047 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6048 6048
6049 6049 // get AESCrypt klass for instanceOf check
6050 6050 // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6051 6051 // will have same classloader as CipherBlockChaining object
6052 6052 const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6053 6053 assert(tinst != NULL, "CBCobj is null");
6054 6054 assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6055 6055
6056 6056 // we want to do an instanceof comparison against the AESCrypt class
6057 6057 ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6058 6058 if (!klass_AESCrypt->is_loaded()) {
6059 6059 // if AESCrypt is not even loaded, we never take the intrinsic fast path
6060 6060 Node* ctrl = control();
6061 6061 set_control(top()); // no regular fast path
6062 6062 return ctrl;
6063 6063 }
6064 6064 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6065 6065
6066 6066 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6067 6067 Node* cmp_instof = _gvn.transform(new (C) CmpINode(instof, intcon(1)));
6068 6068 Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
6069 6069
6070 6070 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6071 6071
6072 6072 // for encryption, we are done
6073 6073 if (!decrypting)
6074 6074 return instof_false; // even if it is NULL
6075 6075
6076 6076 // for decryption, we need to add a further check to avoid
6077 6077 // taking the intrinsic path when cipher and plain are the same
6078 6078 // see the original java code for why.
6079 6079 RegionNode* region = new(C) RegionNode(3);
6080 6080 region->init_req(1, instof_false);
6081 6081 Node* src = argument(1);
6082 6082 Node* dest = argument(4);
6083 6083 Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest));
6084 6084 Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq));
6085 6085 Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6086 6086 region->init_req(2, src_dest_conjoint);
6087 6087
6088 6088 record_for_igvn(region);
6089 6089 return _gvn.transform(region);
6090 6090 }
↓ open down ↓ |
3477 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX