1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP 26 #define SHARE_VM_OPTO_GRAPHKIT_HPP 27 28 #include "ci/ciEnv.hpp" 29 #include "ci/ciMethodData.hpp" 30 #include "gc/shared/c2BarrierSetCodeGen.hpp" 31 #include "opto/addnode.hpp" 32 #include "opto/callnode.hpp" 33 #include "opto/cfgnode.hpp" 34 #include "opto/compile.hpp" 35 #include "opto/divnode.hpp" 36 #include "opto/mulnode.hpp" 37 #include "opto/phaseX.hpp" 38 #include "opto/subnode.hpp" 39 #include "opto/type.hpp" 40 #include "runtime/deoptimization.hpp" 41 42 class FastLockNode; 43 class FastUnlockNode; 44 class IdealKit; 45 class LibraryCallKit; 46 class Parse; 47 class RootNode; 48 49 //----------------------------------------------------------------------------- 50 //----------------------------GraphKit----------------------------------------- 51 // Toolkit for building the common sorts of subgraphs. 52 // Does not know about bytecode parsing or type-flow results. 53 // It is able to create graphs implementing the semantics of most 54 // or all bytecodes, so that it can expand intrinsics and calls. 55 // It may depend on JVMState structure, but it must not depend 56 // on specific bytecode streams. 57 class GraphKit : public Phase { 58 friend class PreserveJVMState; 59 60 protected: 61 ciEnv* _env; // Compilation environment 62 PhaseGVN &_gvn; // Some optimizations while parsing 63 SafePointNode* _map; // Parser map from JVM to Nodes 64 SafePointNode* _exceptions;// Parser map(s) for exception state(s) 65 int _bci; // JVM Bytecode Pointer 66 ciMethod* _method; // JVM Current Method 67 68 private: 69 int _sp; // JVM Expression Stack Pointer; don't modify directly! 70 71 private: 72 SafePointNode* map_not_null() const { 73 assert(_map != NULL, "must call stopped() to test for reset compiler map"); 74 return _map; 75 } 76 77 public: 78 GraphKit(); // empty constructor 79 GraphKit(JVMState* jvms); // the JVM state on which to operate 80 81 #ifdef ASSERT 82 ~GraphKit() { 83 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); 84 } 85 #endif 86 87 virtual Parse* is_Parse() const { return NULL; } 88 virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; } 89 90 ciEnv* env() const { return _env; } 91 PhaseGVN& gvn() const { return _gvn; } 92 93 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile 94 95 // Handy well-known nodes: 96 Node* null() const { return zerocon(T_OBJECT); } 97 Node* top() const { return C->top(); } 98 RootNode* root() const { return C->root(); } 99 100 // Create or find a constant node 101 Node* intcon(jint con) const { return _gvn.intcon(con); } 102 Node* longcon(jlong con) const { return _gvn.longcon(con); } 103 Node* makecon(const Type *t) const { return _gvn.makecon(t); } 104 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); } 105 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.) 106 107 jint find_int_con(Node* n, jint value_if_unknown) { 108 return _gvn.find_int_con(n, value_if_unknown); 109 } 110 jlong find_long_con(Node* n, jlong value_if_unknown) { 111 return _gvn.find_long_con(n, value_if_unknown); 112 } 113 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.) 114 115 // JVM State accessors: 116 // Parser mapping from JVM indices into Nodes. 117 // Low slots are accessed by the StartNode::enum. 118 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals(); 119 // Then come JVM stack slots. 120 // Finally come the monitors, if any. 121 // See layout accessors in class JVMState. 122 123 SafePointNode* map() const { return _map; } 124 bool has_exceptions() const { return _exceptions != NULL; } 125 JVMState* jvms() const { return map_not_null()->_jvms; } 126 int sp() const { return _sp; } 127 int bci() const { return _bci; } 128 Bytecodes::Code java_bc() const; 129 ciMethod* method() const { return _method; } 130 131 void set_jvms(JVMState* jvms) { set_map(jvms->map()); 132 assert(jvms == this->jvms(), "sanity"); 133 _sp = jvms->sp(); 134 _bci = jvms->bci(); 135 _method = jvms->has_method() ? jvms->method() : NULL; } 136 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); } 137 void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; } 138 void clean_stack(int from_sp); // clear garbage beyond from_sp to top 139 140 void inc_sp(int i) { set_sp(sp() + i); } 141 void dec_sp(int i) { set_sp(sp() - i); } 142 void set_bci(int bci) { _bci = bci; } 143 144 // Make sure jvms has current bci & sp. 145 JVMState* sync_jvms() const; 146 JVMState* sync_jvms_for_reexecute(); 147 148 #ifdef ASSERT 149 // Make sure JVMS has an updated copy of bci and sp. 150 // Also sanity-check method, depth, and monitor depth. 151 bool jvms_in_sync() const; 152 153 // Make sure the map looks OK. 154 void verify_map() const; 155 156 // Make sure a proposed exception state looks OK. 157 static void verify_exception_state(SafePointNode* ex_map); 158 #endif 159 160 // Clone the existing map state. (Implements PreserveJVMState.) 161 SafePointNode* clone_map(); 162 163 // Set the map to a clone of the given one. 164 void set_map_clone(SafePointNode* m); 165 166 // Tell if the compilation is failing. 167 bool failing() const { return C->failing(); } 168 169 // Set _map to NULL, signalling a stop to further bytecode execution. 170 // Preserve the map intact for future use, and return it back to the caller. 171 SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; } 172 173 // Stop, but first smash the map's inputs to NULL, to mark it dead. 174 void stop_and_kill_map(); 175 176 // Tell if _map is NULL, or control is top. 177 bool stopped(); 178 179 // Tell if this method or any caller method has exception handlers. 180 bool has_ex_handler(); 181 182 // Save an exception without blowing stack contents or other JVM state. 183 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.) 184 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop); 185 186 // Recover a saved exception from its map. 187 static Node* saved_ex_oop(SafePointNode* ex_map); 188 189 // Recover a saved exception from its map, and remove it from the map. 190 static Node* clear_saved_ex_oop(SafePointNode* ex_map); 191 192 #ifdef ASSERT 193 // Recover a saved exception from its map, and remove it from the map. 194 static bool has_saved_ex_oop(SafePointNode* ex_map); 195 #endif 196 197 // Push an exception in the canonical position for handlers (stack(0)). 198 void push_ex_oop(Node* ex_oop) { 199 ensure_stack(1); // ensure room to push the exception 200 set_stack(0, ex_oop); 201 set_sp(1); 202 clean_stack(1); 203 } 204 205 // Detach and return an exception state. 206 SafePointNode* pop_exception_state() { 207 SafePointNode* ex_map = _exceptions; 208 if (ex_map != NULL) { 209 _exceptions = ex_map->next_exception(); 210 ex_map->set_next_exception(NULL); 211 debug_only(verify_exception_state(ex_map)); 212 } 213 return ex_map; 214 } 215 216 // Add an exception, using the given JVM state, without commoning. 217 void push_exception_state(SafePointNode* ex_map) { 218 debug_only(verify_exception_state(ex_map)); 219 ex_map->set_next_exception(_exceptions); 220 _exceptions = ex_map; 221 } 222 223 // Turn the current JVM state into an exception state, appending the ex_oop. 224 SafePointNode* make_exception_state(Node* ex_oop); 225 226 // Add an exception, using the given JVM state. 227 // Combine all exceptions with a common exception type into a single state. 228 // (This is done via combine_exception_states.) 229 void add_exception_state(SafePointNode* ex_map); 230 231 // Combine all exceptions of any sort whatever into a single master state. 232 SafePointNode* combine_and_pop_all_exception_states() { 233 if (_exceptions == NULL) return NULL; 234 SafePointNode* phi_map = pop_exception_state(); 235 SafePointNode* ex_map; 236 while ((ex_map = pop_exception_state()) != NULL) { 237 combine_exception_states(ex_map, phi_map); 238 } 239 return phi_map; 240 } 241 242 // Combine the two exception states, building phis as necessary. 243 // The second argument is updated to include contributions from the first. 244 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map); 245 246 // Reset the map to the given state. If there are any half-finished phis 247 // in it (created by combine_exception_states), transform them now. 248 // Returns the exception oop. (Caller must call push_ex_oop if required.) 249 Node* use_exception_state(SafePointNode* ex_map); 250 251 // Collect exceptions from a given JVM state into my exception list. 252 void add_exception_states_from(JVMState* jvms); 253 254 // Collect all raised exceptions into the current JVM state. 255 // Clear the current exception list and map, returns the combined states. 256 JVMState* transfer_exceptions_into_jvms(); 257 258 // Helper to throw a built-in exception. 259 // Range checks take the offending index. 260 // Cast and array store checks take the offending class. 261 // Others do not take the optional argument. 262 // The JVMS must allow the bytecode to be re-executed 263 // via an uncommon trap. 264 void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL); 265 266 // Helper to check the JavaThread::_should_post_on_exceptions flag 267 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw) 268 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, 269 bool must_throw) ; 270 271 // Helper Functions for adding debug information 272 void kill_dead_locals(); 273 #ifdef ASSERT 274 bool dead_locals_are_killed(); 275 #endif 276 // The call may deoptimize. Supply required JVM state as debug info. 277 // If must_throw is true, the call is guaranteed not to return normally. 278 void add_safepoint_edges(SafePointNode* call, 279 bool must_throw = false); 280 281 // How many stack inputs does the current BC consume? 282 // And, how does the stack change after the bytecode? 283 // Returns false if unknown. 284 bool compute_stack_effects(int& inputs, int& depth); 285 286 // Add a fixed offset to a pointer 287 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) { 288 return basic_plus_adr(base, ptr, MakeConX(offset)); 289 } 290 Node* basic_plus_adr(Node* base, intptr_t offset) { 291 return basic_plus_adr(base, base, MakeConX(offset)); 292 } 293 // Add a variable offset to a pointer 294 Node* basic_plus_adr(Node* base, Node* offset) { 295 return basic_plus_adr(base, base, offset); 296 } 297 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset); 298 299 300 // Some convenient shortcuts for common nodes 301 Node* IfTrue(IfNode* iff) { return _gvn.transform(new IfTrueNode(iff)); } 302 Node* IfFalse(IfNode* iff) { return _gvn.transform(new IfFalseNode(iff)); } 303 304 Node* AddI(Node* l, Node* r) { return _gvn.transform(new AddINode(l, r)); } 305 Node* SubI(Node* l, Node* r) { return _gvn.transform(new SubINode(l, r)); } 306 Node* MulI(Node* l, Node* r) { return _gvn.transform(new MulINode(l, r)); } 307 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new DivINode(ctl, l, r)); } 308 309 Node* AndI(Node* l, Node* r) { return _gvn.transform(new AndINode(l, r)); } 310 Node* OrI(Node* l, Node* r) { return _gvn.transform(new OrINode(l, r)); } 311 Node* XorI(Node* l, Node* r) { return _gvn.transform(new XorINode(l, r)); } 312 313 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new MaxINode(l, r)); } 314 Node* MinI(Node* l, Node* r) { return _gvn.transform(new MinINode(l, r)); } 315 316 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new LShiftINode(l, r)); } 317 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new RShiftINode(l, r)); } 318 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new URShiftINode(l, r)); } 319 320 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new CmpINode(l, r)); } 321 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new CmpLNode(l, r)); } 322 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); } 323 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); } 324 325 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new AddPNode(b, a, o)); } 326 327 // Convert between int and long, and size_t. 328 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.) 329 Node* ConvI2L(Node* offset); 330 Node* ConvI2UL(Node* offset); 331 Node* ConvL2I(Node* offset); 332 // Find out the klass of an object. 333 Node* load_object_klass(Node* object); 334 // Find out the length of an array. 335 Node* load_array_length(Node* array); 336 337 338 // Helper function to do a NULL pointer check or ZERO check based on type. 339 // Throw an exception if a given value is null. 340 // Return the value cast to not-null. 341 // Be clever about equivalent dominating null checks. 342 Node* null_check_common(Node* value, BasicType type, 343 bool assert_null = false, 344 Node* *null_control = NULL, 345 bool speculative = false); 346 Node* null_check(Node* value, BasicType type = T_OBJECT) { 347 return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null()); 348 } 349 Node* null_check_receiver() { 350 assert(argument(0)->bottom_type()->isa_ptr(), "must be"); 351 return null_check(argument(0)); 352 } 353 Node* zero_check_int(Node* value) { 354 assert(value->bottom_type()->basic_type() == T_INT, 355 "wrong type: %s", type2name(value->bottom_type()->basic_type())); 356 return null_check_common(value, T_INT); 357 } 358 Node* zero_check_long(Node* value) { 359 assert(value->bottom_type()->basic_type() == T_LONG, 360 "wrong type: %s", type2name(value->bottom_type()->basic_type())); 361 return null_check_common(value, T_LONG); 362 } 363 // Throw an uncommon trap if a given value is __not__ null. 364 // Return the value cast to null, and be clever about dominating checks. 365 Node* null_assert(Node* value, BasicType type = T_OBJECT) { 366 return null_check_common(value, type, true); 367 } 368 369 // Null check oop. Return null-path control into (*null_control). 370 // Return a cast-not-null node which depends on the not-null control. 371 // If never_see_null, use an uncommon trap (*null_control sees a top). 372 // The cast is not valid along the null path; keep a copy of the original. 373 // If safe_for_replace, then we can replace the value with the cast 374 // in the parsing map (the cast is guaranteed to dominate the map) 375 Node* null_check_oop(Node* value, Node* *null_control, 376 bool never_see_null = false, 377 bool safe_for_replace = false, 378 bool speculative = false); 379 380 // Check the null_seen bit. 381 bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating); 382 383 // Check for unique class for receiver at call 384 ciKlass* profile_has_unique_klass() { 385 ciCallProfile profile = method()->call_profile_at_bci(bci()); 386 if (profile.count() >= 0 && // no cast failures here 387 profile.has_receiver(0) && 388 profile.morphism() == 1) { 389 return profile.receiver(0); 390 } 391 return NULL; 392 } 393 394 // record type from profiling with the type system 395 Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, bool maybe_null); 396 void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc); 397 void record_profiled_parameters_for_speculation(); 398 void record_profiled_return_for_speculation(); 399 Node* record_profiled_receiver_for_speculation(Node* n); 400 401 // Use the type profile to narrow an object type. 402 Node* maybe_cast_profiled_receiver(Node* not_null_obj, 403 ciKlass* require_klass, 404 ciKlass* spec, 405 bool safe_for_replace); 406 407 // Cast obj to type and emit guard unless we had too many traps here already 408 Node* maybe_cast_profiled_obj(Node* obj, 409 ciKlass* type, 410 bool not_null = false); 411 412 // Cast obj to not-null on this path 413 Node* cast_not_null(Node* obj, bool do_replace_in_map = true); 414 // Replace all occurrences of one node by another. 415 void replace_in_map(Node* old, Node* neww); 416 417 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); } 418 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); } 419 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); } 420 421 void push_pair(Node* ldval) { 422 push(ldval); 423 push(top()); // the halfword is merely a placeholder 424 } 425 void push_pair_local(int i) { 426 // longs are stored in locals in "push" order 427 push( local(i+0) ); // the real value 428 assert(local(i+1) == top(), ""); 429 push(top()); // halfword placeholder 430 } 431 Node* pop_pair() { 432 // the second half is pushed last & popped first; it contains exactly nothing 433 Node* halfword = pop(); 434 assert(halfword == top(), ""); 435 // the long bits are pushed first & popped last: 436 return pop(); 437 } 438 void set_pair_local(int i, Node* lval) { 439 // longs are stored in locals as a value/half pair (like doubles) 440 set_local(i+0, lval); 441 set_local(i+1, top()); 442 } 443 444 // Push the node, which may be zero, one, or two words. 445 void push_node(BasicType n_type, Node* n) { 446 int n_size = type2size[n_type]; 447 if (n_size == 1) push( n ); // T_INT, ... 448 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG 449 else { assert(n_size == 0, "must be T_VOID"); } 450 } 451 452 Node* pop_node(BasicType n_type) { 453 int n_size = type2size[n_type]; 454 if (n_size == 1) return pop(); 455 else if (n_size == 2) return pop_pair(); 456 else return NULL; 457 } 458 459 Node* control() const { return map_not_null()->control(); } 460 Node* i_o() const { return map_not_null()->i_o(); } 461 Node* returnadr() const { return map_not_null()->returnadr(); } 462 Node* frameptr() const { return map_not_null()->frameptr(); } 463 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); } 464 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); } 465 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); } 466 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); } 467 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); } 468 469 void set_control (Node* c) { map_not_null()->set_control(c); } 470 void set_i_o (Node* c) { map_not_null()->set_i_o(c); } 471 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); } 472 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); } 473 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); } 474 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); } 475 476 // Access unaliased memory 477 Node* memory(uint alias_idx); 478 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); } 479 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); } 480 481 // Access immutable memory 482 Node* immutable_memory() { return C->immutable_memory(); } 483 484 // Set unaliased memory 485 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); } 486 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); } 487 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); } 488 489 // Get the entire memory state (probably a MergeMemNode), and reset it 490 // (The resetting prevents somebody from using the dangling Node pointer.) 491 Node* reset_memory(); 492 493 // Get the entire memory state, asserted to be a MergeMemNode. 494 MergeMemNode* merged_memory() { 495 Node* mem = map_not_null()->memory(); 496 assert(mem->is_MergeMem(), "parse memory is always pre-split"); 497 return mem->as_MergeMem(); 498 } 499 500 // Set the entire memory state; produce a new MergeMemNode. 501 void set_all_memory(Node* newmem); 502 503 // Create a memory projection from the call, then set_all_memory. 504 void set_all_memory_call(Node* call, bool separate_io_proj = false); 505 506 // Create a LoadNode, reading from the parser's memory state. 507 // (Note: require_atomic_access is useful only with T_LONG.) 508 // 509 // We choose the unordered semantics by default because we have 510 // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case 511 // of volatile fields. 512 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 513 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 514 bool require_atomic_access = false, bool unaligned = false, 515 bool mismatched = false) { 516 // This version computes alias_index from bottom_type 517 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), 518 mo, control_dependency, require_atomic_access, 519 unaligned, mismatched); 520 } 521 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, 522 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 523 bool require_atomic_access = false, bool unaligned = false, 524 bool mismatched = false) { 525 // This version computes alias_index from an address type 526 assert(adr_type != NULL, "use other make_load factory"); 527 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), 528 mo, control_dependency, require_atomic_access, 529 unaligned, mismatched); 530 } 531 // This is the base version which is given an alias index. 532 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, 533 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 534 bool require_atomic_access = false, bool unaligned = false, 535 bool mismatched = false); 536 537 // Create & transform a StoreNode and store the effect into the 538 // parser's memory state. 539 // 540 // We must ensure that stores of object references will be visible 541 // only after the object's initialization. So the clients of this 542 // procedure must indicate that the store requires `release' 543 // semantics, if the stored value is an object reference that might 544 // point to a new object and may become externally visible. 545 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 546 const TypePtr* adr_type, 547 MemNode::MemOrd mo, 548 bool require_atomic_access = false, 549 bool unaligned = false, 550 bool mismatched = false) { 551 // This version computes alias_index from an address type 552 assert(adr_type != NULL, "use other store_to_memory factory"); 553 return store_to_memory(ctl, adr, val, bt, 554 C->get_alias_index(adr_type), 555 mo, require_atomic_access, 556 unaligned, mismatched); 557 } 558 // This is the base version which is given alias index 559 // Return the new StoreXNode 560 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 561 int adr_idx, 562 MemNode::MemOrd, 563 bool require_atomic_access = false, 564 bool unaligned = false, 565 bool mismatched = false); 566 567 // Perform decorated accesses 568 569 Node* access_store_at(Node* ctl, 570 Node* obj, // containing obj 571 Node* adr, // actual adress to store val at 572 const TypePtr* adr_type, 573 Node* val, 574 const Type* val_type, 575 BasicType bt, 576 C2DecoratorSet decorators); 577 578 Node* access_load_at(Node* obj, // containing obj 579 Node* adr, // actual adress to store val at 580 const TypePtr* adr_type, 581 const Type* val_type, 582 BasicType bt, 583 C2DecoratorSet decorators); 584 585 Node* access_cas_val_at(Node* ctl, 586 Node* obj, 587 Node* adr, 588 const TypePtr* adr_type, 589 int alias_idx, 590 Node* expected_val, 591 Node* new_val, 592 const Type* value_type, 593 BasicType bt, 594 C2DecoratorSet decorators); 595 596 Node* access_cas_bool_at(Node* ctl, 597 Node* obj, 598 Node* adr, 599 const TypePtr* adr_type, 600 int alias_idx, 601 Node* expected_val, 602 Node* new_val, 603 const Type* value_type, 604 BasicType bt, 605 C2DecoratorSet decorators); 606 607 Node* access_swap_at(Node* ctl, 608 Node* obj, 609 Node* adr, 610 const TypePtr* adr_type, 611 int alias_idx, 612 Node* new_val, 613 const Type* value_type, 614 BasicType bt, 615 C2DecoratorSet decorators); 616 617 Node* access_fetch_and_add_at(Node* ctl, 618 Node* obj, 619 Node* adr, 620 const TypePtr* adr_type, 621 int alias_idx, 622 Node* new_val, 623 const Type* value_type, 624 BasicType bt, 625 C2DecoratorSet decorators); 626 627 void access_clone(Node* ctl, Node* src, Node* dst, Node* size, bool is_array); 628 629 // Return addressing for an array element. 630 Node* array_element_address(Node* ary, Node* idx, BasicType elembt, 631 // Optional constraint on the array size: 632 const TypeInt* sizetype = NULL, 633 // Optional control dependency (for example, on range check) 634 Node* ctrl = NULL); 635 636 // Return a load of array element at idx. 637 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype); 638 639 //---------------- Dtrace support -------------------- 640 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry); 641 void make_dtrace_method_entry(ciMethod* method) { 642 make_dtrace_method_entry_exit(method, true); 643 } 644 void make_dtrace_method_exit(ciMethod* method) { 645 make_dtrace_method_entry_exit(method, false); 646 } 647 648 //--------------- stub generation ------------------- 649 public: 650 void gen_stub(address C_function, 651 const char *name, 652 int is_fancy_jump, 653 bool pass_tls, 654 bool return_pc); 655 656 //---------- help for generating calls -------------- 657 658 // Do a null check on the receiver as it would happen before the call to 659 // callee (with all arguments still on the stack). 660 Node* null_check_receiver_before_call(ciMethod* callee) { 661 assert(!callee->is_static(), "must be a virtual method"); 662 // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 663 // Use callsite signature always. 664 ciMethod* declared_method = method()->get_method_at_bci(bci()); 665 const int nargs = declared_method->arg_size(); 666 inc_sp(nargs); 667 Node* n = null_check_receiver(); 668 dec_sp(nargs); 669 return n; 670 } 671 672 // Fill in argument edges for the call from argument(0), argument(1), ... 673 // (The next step is to call set_edges_for_java_call.) 674 void set_arguments_for_java_call(CallJavaNode* call); 675 676 // Fill in non-argument edges for the call. 677 // Transform the call, and update the basics: control, i_o, memory. 678 // (The next step is usually to call set_results_for_java_call.) 679 void set_edges_for_java_call(CallJavaNode* call, 680 bool must_throw = false, bool separate_io_proj = false); 681 682 // Finish up a java call that was started by set_edges_for_java_call. 683 // Call add_exception on any throw arising from the call. 684 // Return the call result (transformed). 685 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false); 686 687 // Similar to set_edges_for_java_call, but simplified for runtime calls. 688 void set_predefined_output_for_runtime_call(Node* call) { 689 set_predefined_output_for_runtime_call(call, NULL, NULL); 690 } 691 void set_predefined_output_for_runtime_call(Node* call, 692 Node* keep_mem, 693 const TypePtr* hook_mem); 694 Node* set_predefined_input_for_runtime_call(SafePointNode* call); 695 696 // Replace the call with the current state of the kit. Requires 697 // that the call was generated with separate io_projs so that 698 // exceptional control flow can be handled properly. 699 void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false); 700 701 // helper functions for statistics 702 void increment_counter(address counter_addr); // increment a debug counter 703 void increment_counter(Node* counter_addr); // increment a debug counter 704 705 // Bail out to the interpreter right now 706 // The optional klass is the one causing the trap. 707 // The optional reason is debug information written to the compile log. 708 // Optional must_throw is the same as with add_safepoint_edges. 709 void uncommon_trap(int trap_request, 710 ciKlass* klass = NULL, const char* reason_string = NULL, 711 bool must_throw = false, bool keep_exact_action = false); 712 713 // Shorthand, to avoid saying "Deoptimization::" so many times. 714 void uncommon_trap(Deoptimization::DeoptReason reason, 715 Deoptimization::DeoptAction action, 716 ciKlass* klass = NULL, const char* reason_string = NULL, 717 bool must_throw = false, bool keep_exact_action = false) { 718 uncommon_trap(Deoptimization::make_trap_request(reason, action), 719 klass, reason_string, must_throw, keep_exact_action); 720 } 721 722 // Bail out to the interpreter and keep exact action (avoid switching to Action_none). 723 void uncommon_trap_exact(Deoptimization::DeoptReason reason, 724 Deoptimization::DeoptAction action, 725 ciKlass* klass = NULL, const char* reason_string = NULL, 726 bool must_throw = false) { 727 uncommon_trap(Deoptimization::make_trap_request(reason, action), 728 klass, reason_string, must_throw, /*keep_exact_action=*/true); 729 } 730 731 // SP when bytecode needs to be reexecuted. 732 virtual int reexecute_sp() { return sp(); } 733 734 // Report if there were too many traps at the current method and bci. 735 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 736 // If there is no MDO at all, report no trap unless told to assume it. 737 bool too_many_traps(Deoptimization::DeoptReason reason) { 738 return C->too_many_traps(method(), bci(), reason); 739 } 740 741 // Report if there were too many recompiles at the current method and bci. 742 bool too_many_recompiles(Deoptimization::DeoptReason reason) { 743 return C->too_many_recompiles(method(), bci(), reason); 744 } 745 746 // Returns the object (if any) which was created the moment before. 747 Node* just_allocated_object(Node* current_control); 748 749 // Sync Ideal and Graph kits. 750 void sync_kit(IdealKit& ideal); 751 void final_sync(IdealKit& ideal); 752 753 public: 754 // Helper function to round double arguments before a call 755 void round_double_arguments(ciMethod* dest_method); 756 void round_double_result(ciMethod* dest_method); 757 758 // rounding for strict float precision conformance 759 Node* precision_rounding(Node* n); 760 761 // rounding for strict double precision conformance 762 Node* dprecision_rounding(Node* n); 763 764 // rounding for non-strict double stores 765 Node* dstore_rounding(Node* n); 766 767 // Helper functions for fast/slow path codes 768 Node* opt_iff(Node* region, Node* iff); 769 Node* make_runtime_call(int flags, 770 const TypeFunc* call_type, address call_addr, 771 const char* call_name, 772 const TypePtr* adr_type, // NULL if no memory effects 773 Node* parm0 = NULL, Node* parm1 = NULL, 774 Node* parm2 = NULL, Node* parm3 = NULL, 775 Node* parm4 = NULL, Node* parm5 = NULL, 776 Node* parm6 = NULL, Node* parm7 = NULL); 777 enum { // flag values for make_runtime_call 778 RC_NO_FP = 1, // CallLeafNoFPNode 779 RC_NO_IO = 2, // do not hook IO edges 780 RC_NO_LEAF = 4, // CallStaticJavaNode 781 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges 782 RC_NARROW_MEM = 16, // input memory is same as output 783 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap 784 RC_LEAF = 0 // null value: no flags set 785 }; 786 787 // merge in all memory slices from new_mem, along the given path 788 void merge_memory(Node* new_mem, Node* region, int new_path); 789 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false); 790 791 // Helper functions to build synchronizations 792 int next_monitor(); 793 Node* insert_mem_bar(int opcode, Node* precedent = NULL); 794 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL); 795 // Optional 'precedent' is appended as an extra edge, to force ordering. 796 FastLockNode* shared_lock(Node* obj); 797 void shared_unlock(Node* box, Node* obj); 798 799 // helper functions for the fast path/slow path idioms 800 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result); 801 802 // Generate an instance-of idiom. Used by both the instance-of bytecode 803 // and the reflective instance-of call. 804 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false); 805 806 // Generate a check-cast idiom. Used by both the check-cast bytecode 807 // and the array-store bytecode 808 Node* gen_checkcast( Node *subobj, Node* superkls, 809 Node* *failure_control = NULL ); 810 811 Node* gen_subtype_check(Node* subklass, Node* superklass) { 812 MergeMemNode* mem = merged_memory(); 813 Node* ctrl = control(); 814 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn); 815 set_control(ctrl); 816 return n; 817 } 818 819 // Exact type check used for predicted calls and casts. 820 // Rewrites (*casted_receiver) to be casted to the stronger type. 821 // (Caller is responsible for doing replace_in_map.) 822 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob, 823 Node* *casted_receiver); 824 825 // implementation of object creation 826 Node* set_output_for_allocation(AllocateNode* alloc, 827 const TypeOopPtr* oop_type, 828 bool deoptimize_on_exception=false); 829 Node* get_layout_helper(Node* klass_node, jint& constant_value); 830 Node* new_instance(Node* klass_node, 831 Node* slow_test = NULL, 832 Node* *return_size_val = NULL, 833 bool deoptimize_on_exception = false); 834 Node* new_array(Node* klass_node, Node* count_val, int nargs, 835 Node* *return_size_val = NULL, 836 bool deoptimize_on_exception = false); 837 838 // java.lang.String helpers 839 Node* load_String_length(Node* ctrl, Node* str); 840 Node* load_String_value(Node* ctrl, Node* str); 841 Node* load_String_coder(Node* ctrl, Node* str); 842 void store_String_value(Node* ctrl, Node* str, Node* value); 843 void store_String_coder(Node* ctrl, Node* str, Node* value); 844 Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type); 845 Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count); 846 void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count); 847 void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count); 848 849 // Handy for making control flow 850 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) { 851 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's 852 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time 853 // Place 'if' on worklist if it will be in graph 854 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later 855 return iff; 856 } 857 858 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) { 859 IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's 860 _gvn.transform(iff); // Value may be known at parse-time 861 // Place 'if' on worklist if it will be in graph 862 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later 863 return iff; 864 } 865 866 // Insert a loop predicate into the graph 867 void add_predicate(int nargs = 0); 868 void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); 869 870 Node* make_constant_from_field(ciField* field, Node* obj); 871 872 // Produce new array node of stable type 873 Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type); 874 }; 875 876 // Helper class to support building of control flow branches. Upon 877 // creation the map and sp at bci are cloned and restored upon de- 878 // struction. Typical use: 879 // 880 // { PreserveJVMState pjvms(this); 881 // // code of new branch 882 // } 883 // // here the JVM state at bci is established 884 885 class PreserveJVMState: public StackObj { 886 protected: 887 GraphKit* _kit; 888 #ifdef ASSERT 889 int _block; // PO of current block, if a Parse 890 int _bci; 891 #endif 892 SafePointNode* _map; 893 uint _sp; 894 895 public: 896 PreserveJVMState(GraphKit* kit, bool clone_map = true); 897 ~PreserveJVMState(); 898 }; 899 900 // Helper class to build cutouts of the form if (p) ; else {x...}. 901 // The code {x...} must not fall through. 902 // The kit's main flow of control is set to the "then" continuation of if(p). 903 class BuildCutout: public PreserveJVMState { 904 public: 905 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN); 906 ~BuildCutout(); 907 }; 908 909 // Helper class to preserve the original _reexecute bit and _sp and restore 910 // them back 911 class PreserveReexecuteState: public StackObj { 912 protected: 913 GraphKit* _kit; 914 uint _sp; 915 JVMState::ReexecuteState _reexecute; 916 917 public: 918 PreserveReexecuteState(GraphKit* kit); 919 ~PreserveReexecuteState(); 920 }; 921 922 #endif // SHARE_VM_OPTO_GRAPHKIT_HPP