1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP 26 #define SHARE_VM_OPTO_GRAPHKIT_HPP 27 28 #include "ci/ciEnv.hpp" 29 #include "ci/ciMethodData.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/callnode.hpp" 32 #include "opto/cfgnode.hpp" 33 #include "opto/compile.hpp" 34 #include "opto/divnode.hpp" 35 #include "opto/mulnode.hpp" 36 #include "opto/phaseX.hpp" 37 #include "opto/subnode.hpp" 38 #include "opto/type.hpp" 39 #include "runtime/deoptimization.hpp" 40 41 class FastLockNode; 42 class FastUnlockNode; 43 class IdealKit; 44 class LibraryCallKit; 45 class Parse; 46 class RootNode; 47 48 //----------------------------------------------------------------------------- 49 //----------------------------GraphKit----------------------------------------- 50 // Toolkit for building the common sorts of subgraphs. 51 // Does not know about bytecode parsing or type-flow results. 52 // It is able to create graphs implementing the semantics of most 53 // or all bytecodes, so that it can expand intrinsics and calls. 54 // It may depend on JVMState structure, but it must not depend 55 // on specific bytecode streams. 56 class GraphKit : public Phase { 57 friend class PreserveJVMState; 58 59 protected: 60 ciEnv* _env; // Compilation environment 61 PhaseGVN &_gvn; // Some optimizations while parsing 62 SafePointNode* _map; // Parser map from JVM to Nodes 63 SafePointNode* _exceptions;// Parser map(s) for exception state(s) 64 int _bci; // JVM Bytecode Pointer 65 ciMethod* _method; // JVM Current Method 66 67 private: 68 int _sp; // JVM Expression Stack Pointer; don't modify directly! 69 70 private: 71 SafePointNode* map_not_null() const { 72 assert(_map != NULL, "must call stopped() to test for reset compiler map"); 73 return _map; 74 } 75 76 public: 77 GraphKit(); // empty constructor 78 GraphKit(JVMState* jvms); // the JVM state on which to operate 79 80 #ifdef ASSERT 81 ~GraphKit() { 82 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); 83 } 84 #endif 85 86 virtual Parse* is_Parse() const { return NULL; } 87 virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; } 88 89 ciEnv* env() const { return _env; } 90 PhaseGVN& gvn() const { return _gvn; } 91 92 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile 93 94 // Handy well-known nodes: 95 Node* null() const { return zerocon(T_OBJECT); } 96 Node* top() const { return C->top(); } 97 RootNode* root() const { return C->root(); } 98 99 // Create or find a constant node 100 Node* intcon(jint con) const { return _gvn.intcon(con); } 101 Node* longcon(jlong con) const { return _gvn.longcon(con); } 102 Node* makecon(const Type *t) const { return _gvn.makecon(t); } 103 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); } 104 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.) 105 106 // Helper for byte_map_base 107 Node* byte_map_base_node() { 108 // Get base of card map 109 CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set()); 110 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code"); 111 if (ct->byte_map_base != NULL) { 112 return makecon(TypeRawPtr::make((address)ct->byte_map_base)); 113 } else { 114 return null(); 115 } 116 } 117 118 jint find_int_con(Node* n, jint value_if_unknown) { 119 return _gvn.find_int_con(n, value_if_unknown); 120 } 121 jlong find_long_con(Node* n, jlong value_if_unknown) { 122 return _gvn.find_long_con(n, value_if_unknown); 123 } 124 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.) 125 126 // JVM State accessors: 127 // Parser mapping from JVM indices into Nodes. 128 // Low slots are accessed by the StartNode::enum. 129 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals(); 130 // Then come JVM stack slots. 131 // Finally come the monitors, if any. 132 // See layout accessors in class JVMState. 133 134 SafePointNode* map() const { return _map; } 135 bool has_exceptions() const { return _exceptions != NULL; } 136 JVMState* jvms() const { return map_not_null()->_jvms; } 137 int sp() const { return _sp; } 138 int bci() const { return _bci; } 139 Bytecodes::Code java_bc() const; 140 ciMethod* method() const { return _method; } 141 142 void set_jvms(JVMState* jvms) { set_map(jvms->map()); 143 assert(jvms == this->jvms(), "sanity"); 144 _sp = jvms->sp(); 145 _bci = jvms->bci(); 146 _method = jvms->has_method() ? jvms->method() : NULL; } 147 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); } 148 void set_sp(int sp) { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; } 149 void clean_stack(int from_sp); // clear garbage beyond from_sp to top 150 151 void inc_sp(int i) { set_sp(sp() + i); } 152 void dec_sp(int i) { set_sp(sp() - i); } 153 void set_bci(int bci) { _bci = bci; } 154 155 // Make sure jvms has current bci & sp. 156 JVMState* sync_jvms() const; 157 JVMState* sync_jvms_for_reexecute(); 158 159 #ifdef ASSERT 160 // Make sure JVMS has an updated copy of bci and sp. 161 // Also sanity-check method, depth, and monitor depth. 162 bool jvms_in_sync() const; 163 164 // Make sure the map looks OK. 165 void verify_map() const; 166 167 // Make sure a proposed exception state looks OK. 168 static void verify_exception_state(SafePointNode* ex_map); 169 #endif 170 171 // Clone the existing map state. (Implements PreserveJVMState.) 172 SafePointNode* clone_map(); 173 174 // Set the map to a clone of the given one. 175 void set_map_clone(SafePointNode* m); 176 177 // Tell if the compilation is failing. 178 bool failing() const { return C->failing(); } 179 180 // Set _map to NULL, signalling a stop to further bytecode execution. 181 // Preserve the map intact for future use, and return it back to the caller. 182 SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; } 183 184 // Stop, but first smash the map's inputs to NULL, to mark it dead. 185 void stop_and_kill_map(); 186 187 // Tell if _map is NULL, or control is top. 188 bool stopped(); 189 190 // Tell if this method or any caller method has exception handlers. 191 bool has_ex_handler(); 192 193 // Save an exception without blowing stack contents or other JVM state. 194 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.) 195 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop); 196 197 // Recover a saved exception from its map. 198 static Node* saved_ex_oop(SafePointNode* ex_map); 199 200 // Recover a saved exception from its map, and remove it from the map. 201 static Node* clear_saved_ex_oop(SafePointNode* ex_map); 202 203 #ifdef ASSERT 204 // Recover a saved exception from its map, and remove it from the map. 205 static bool has_saved_ex_oop(SafePointNode* ex_map); 206 #endif 207 208 // Push an exception in the canonical position for handlers (stack(0)). 209 void push_ex_oop(Node* ex_oop) { 210 ensure_stack(1); // ensure room to push the exception 211 set_stack(0, ex_oop); 212 set_sp(1); 213 clean_stack(1); 214 } 215 216 // Detach and return an exception state. 217 SafePointNode* pop_exception_state() { 218 SafePointNode* ex_map = _exceptions; 219 if (ex_map != NULL) { 220 _exceptions = ex_map->next_exception(); 221 ex_map->set_next_exception(NULL); 222 debug_only(verify_exception_state(ex_map)); 223 } 224 return ex_map; 225 } 226 227 // Add an exception, using the given JVM state, without commoning. 228 void push_exception_state(SafePointNode* ex_map) { 229 debug_only(verify_exception_state(ex_map)); 230 ex_map->set_next_exception(_exceptions); 231 _exceptions = ex_map; 232 } 233 234 // Turn the current JVM state into an exception state, appending the ex_oop. 235 SafePointNode* make_exception_state(Node* ex_oop); 236 237 // Add an exception, using the given JVM state. 238 // Combine all exceptions with a common exception type into a single state. 239 // (This is done via combine_exception_states.) 240 void add_exception_state(SafePointNode* ex_map); 241 242 // Combine all exceptions of any sort whatever into a single master state. 243 SafePointNode* combine_and_pop_all_exception_states() { 244 if (_exceptions == NULL) return NULL; 245 SafePointNode* phi_map = pop_exception_state(); 246 SafePointNode* ex_map; 247 while ((ex_map = pop_exception_state()) != NULL) { 248 combine_exception_states(ex_map, phi_map); 249 } 250 return phi_map; 251 } 252 253 // Combine the two exception states, building phis as necessary. 254 // The second argument is updated to include contributions from the first. 255 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map); 256 257 // Reset the map to the given state. If there are any half-finished phis 258 // in it (created by combine_exception_states), transform them now. 259 // Returns the exception oop. (Caller must call push_ex_oop if required.) 260 Node* use_exception_state(SafePointNode* ex_map); 261 262 // Collect exceptions from a given JVM state into my exception list. 263 void add_exception_states_from(JVMState* jvms); 264 265 // Collect all raised exceptions into the current JVM state. 266 // Clear the current exception list and map, returns the combined states. 267 JVMState* transfer_exceptions_into_jvms(); 268 269 // Helper to throw a built-in exception. 270 // Range checks take the offending index. 271 // Cast and array store checks take the offending class. 272 // Others do not take the optional argument. 273 // The JVMS must allow the bytecode to be re-executed 274 // via an uncommon trap. 275 void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL); 276 277 // Helper to check the JavaThread::_should_post_on_exceptions flag 278 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw) 279 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, 280 bool must_throw) ; 281 282 // Helper Functions for adding debug information 283 void kill_dead_locals(); 284 #ifdef ASSERT 285 bool dead_locals_are_killed(); 286 #endif 287 // The call may deoptimize. Supply required JVM state as debug info. 288 // If must_throw is true, the call is guaranteed not to return normally. 289 void add_safepoint_edges(SafePointNode* call, 290 bool must_throw = false); 291 292 // How many stack inputs does the current BC consume? 293 // And, how does the stack change after the bytecode? 294 // Returns false if unknown. 295 bool compute_stack_effects(int& inputs, int& depth); 296 297 // Add a fixed offset to a pointer 298 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) { 299 return basic_plus_adr(base, ptr, MakeConX(offset)); 300 } 301 Node* basic_plus_adr(Node* base, intptr_t offset) { 302 return basic_plus_adr(base, base, MakeConX(offset)); 303 } 304 // Add a variable offset to a pointer 305 Node* basic_plus_adr(Node* base, Node* offset) { 306 return basic_plus_adr(base, base, offset); 307 } 308 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset); 309 310 311 // Some convenient shortcuts for common nodes 312 Node* IfTrue(IfNode* iff) { return _gvn.transform(new (C) IfTrueNode(iff)); } 313 Node* IfFalse(IfNode* iff) { return _gvn.transform(new (C) IfFalseNode(iff)); } 314 315 Node* AddI(Node* l, Node* r) { return _gvn.transform(new (C) AddINode(l, r)); } 316 Node* SubI(Node* l, Node* r) { return _gvn.transform(new (C) SubINode(l, r)); } 317 Node* MulI(Node* l, Node* r) { return _gvn.transform(new (C) MulINode(l, r)); } 318 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new (C) DivINode(ctl, l, r)); } 319 320 Node* AndI(Node* l, Node* r) { return _gvn.transform(new (C) AndINode(l, r)); } 321 Node* OrI(Node* l, Node* r) { return _gvn.transform(new (C) OrINode(l, r)); } 322 Node* XorI(Node* l, Node* r) { return _gvn.transform(new (C) XorINode(l, r)); } 323 324 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new (C) MaxINode(l, r)); } 325 Node* MinI(Node* l, Node* r) { return _gvn.transform(new (C) MinINode(l, r)); } 326 327 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new (C) LShiftINode(l, r)); } 328 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new (C) RShiftINode(l, r)); } 329 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new (C) URShiftINode(l, r)); } 330 331 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new (C) CmpINode(l, r)); } 332 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new (C) CmpLNode(l, r)); } 333 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new (C) CmpPNode(l, r)); } 334 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C) BoolNode(cmp, relop)); } 335 336 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new (C) AddPNode(b, a, o)); } 337 338 // Convert between int and long, and size_t. 339 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.) 340 Node* ConvI2L(Node* offset); 341 Node* ConvL2I(Node* offset); 342 // Find out the klass of an object. 343 Node* load_object_klass(Node* object); 344 // Find out the length of an array. 345 Node* load_array_length(Node* array); 346 347 348 // Helper function to do a NULL pointer check or ZERO check based on type. 349 // Throw an exception if a given value is null. 350 // Return the value cast to not-null. 351 // Be clever about equivalent dominating null checks. 352 Node* null_check_common(Node* value, BasicType type, 353 bool assert_null = false, Node* *null_control = NULL); 354 Node* null_check(Node* value, BasicType type = T_OBJECT) { 355 return null_check_common(value, type); 356 } 357 Node* null_check_receiver() { 358 assert(argument(0)->bottom_type()->isa_ptr(), "must be"); 359 return null_check(argument(0)); 360 } 361 Node* zero_check_int(Node* value) { 362 assert(value->bottom_type()->basic_type() == T_INT, 363 err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type()))); 364 return null_check_common(value, T_INT); 365 } 366 Node* zero_check_long(Node* value) { 367 assert(value->bottom_type()->basic_type() == T_LONG, 368 err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type()))); 369 return null_check_common(value, T_LONG); 370 } 371 // Throw an uncommon trap if a given value is __not__ null. 372 // Return the value cast to null, and be clever about dominating checks. 373 Node* null_assert(Node* value, BasicType type = T_OBJECT) { 374 return null_check_common(value, type, true); 375 } 376 377 // Null check oop. Return null-path control into (*null_control). 378 // Return a cast-not-null node which depends on the not-null control. 379 // If never_see_null, use an uncommon trap (*null_control sees a top). 380 // The cast is not valid along the null path; keep a copy of the original. 381 // If safe_for_replace, then we can replace the value with the cast 382 // in the parsing map (the cast is guaranteed to dominate the map) 383 Node* null_check_oop(Node* value, Node* *null_control, 384 bool never_see_null = false, bool safe_for_replace = false); 385 386 // Check the null_seen bit. 387 bool seems_never_null(Node* obj, ciProfileData* data); 388 389 // At a call, if profiling tells us there's a unique klass for the 390 // receiver, return it. 391 ciKlass* profile_has_unique_klass() { 392 ciCallProfile profile = method()->call_profile_at_bci(bci()); 393 if (profile.count() >= 0 && // no cast failures here 394 profile.has_receiver(0) && 395 profile.morphism() == 1) { 396 return profile.receiver(0); 397 } 398 return NULL; 399 } 400 401 // Record profiling data exact_kls for Node n with the type system 402 // so that it can propagate it (speculation) 403 Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls); 404 // Record profiling data from receiver profiling at an invoke with 405 // the type system so that it can propagate it (speculation) 406 Node* record_profiled_receiver_for_speculation(Node* n); 407 // Record profiling data from argument profiling at an invoke with 408 // the type system so that it can propagate it (speculation) 409 void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc); 410 // Record profiling data from argument profiling at an invoke with 411 // the type system so that it can propagate it (speculation) 412 void record_profiled_parameters_for_speculation(); 413 414 // Use the type profile to narrow an object type. 415 Node* maybe_cast_profiled_receiver(Node* not_null_obj, 416 ciKlass* require_klass, 417 ciKlass* spec, 418 bool safe_for_replace); 419 420 // Cast obj to type and emit guard unless we had too many traps here already 421 Node* maybe_cast_profiled_obj(Node* obj, 422 ciKlass* type, 423 bool not_null = false); 424 425 // Cast obj to not-null on this path 426 Node* cast_not_null(Node* obj, bool do_replace_in_map = true); 427 // Replace all occurrences of one node by another. 428 void replace_in_map(Node* old, Node* neww); 429 430 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); } 431 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); } 432 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); } 433 434 void push_pair(Node* ldval) { 435 push(ldval); 436 push(top()); // the halfword is merely a placeholder 437 } 438 void push_pair_local(int i) { 439 // longs are stored in locals in "push" order 440 push( local(i+0) ); // the real value 441 assert(local(i+1) == top(), ""); 442 push(top()); // halfword placeholder 443 } 444 Node* pop_pair() { 445 // the second half is pushed last & popped first; it contains exactly nothing 446 Node* halfword = pop(); 447 assert(halfword == top(), ""); 448 // the long bits are pushed first & popped last: 449 return pop(); 450 } 451 void set_pair_local(int i, Node* lval) { 452 // longs are stored in locals as a value/half pair (like doubles) 453 set_local(i+0, lval); 454 set_local(i+1, top()); 455 } 456 457 // Push the node, which may be zero, one, or two words. 458 void push_node(BasicType n_type, Node* n) { 459 int n_size = type2size[n_type]; 460 if (n_size == 1) push( n ); // T_INT, ... 461 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG 462 else { assert(n_size == 0, "must be T_VOID"); } 463 } 464 465 Node* pop_node(BasicType n_type) { 466 int n_size = type2size[n_type]; 467 if (n_size == 1) return pop(); 468 else if (n_size == 2) return pop_pair(); 469 else return NULL; 470 } 471 472 Node* control() const { return map_not_null()->control(); } 473 Node* i_o() const { return map_not_null()->i_o(); } 474 Node* returnadr() const { return map_not_null()->returnadr(); } 475 Node* frameptr() const { return map_not_null()->frameptr(); } 476 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); } 477 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); } 478 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); } 479 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); } 480 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); } 481 482 void set_control (Node* c) { map_not_null()->set_control(c); } 483 void set_i_o (Node* c) { map_not_null()->set_i_o(c); } 484 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); } 485 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); } 486 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); } 487 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); } 488 489 // Access unaliased memory 490 Node* memory(uint alias_idx); 491 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); } 492 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); } 493 494 // Access immutable memory 495 Node* immutable_memory() { return C->immutable_memory(); } 496 497 // Set unaliased memory 498 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); } 499 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); } 500 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); } 501 502 // Get the entire memory state (probably a MergeMemNode), and reset it 503 // (The resetting prevents somebody from using the dangling Node pointer.) 504 Node* reset_memory(); 505 506 // Get the entire memory state, asserted to be a MergeMemNode. 507 MergeMemNode* merged_memory() { 508 Node* mem = map_not_null()->memory(); 509 assert(mem->is_MergeMem(), "parse memory is always pre-split"); 510 return mem->as_MergeMem(); 511 } 512 513 // Set the entire memory state; produce a new MergeMemNode. 514 void set_all_memory(Node* newmem); 515 516 // Create a memory projection from the call, then set_all_memory. 517 void set_all_memory_call(Node* call, bool separate_io_proj = false); 518 519 // Create a LoadNode, reading from the parser's memory state. 520 // (Note: require_atomic_access is useful only with T_LONG.) 521 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 522 bool require_atomic_access = false) { 523 // This version computes alias_index from bottom_type 524 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), 525 require_atomic_access); 526 } 527 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) { 528 // This version computes alias_index from an address type 529 assert(adr_type != NULL, "use other make_load factory"); 530 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), 531 require_atomic_access); 532 } 533 // This is the base version which is given an alias index. 534 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false); 535 536 // Create & transform a StoreNode and store the effect into the 537 // parser's memory state. 538 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 539 const TypePtr* adr_type, 540 bool require_atomic_access = false) { 541 // This version computes alias_index from an address type 542 assert(adr_type != NULL, "use other store_to_memory factory"); 543 return store_to_memory(ctl, adr, val, bt, 544 C->get_alias_index(adr_type), 545 require_atomic_access); 546 } 547 // This is the base version which is given alias index 548 // Return the new StoreXNode 549 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 550 int adr_idx, 551 bool require_atomic_access = false); 552 553 554 // All in one pre-barrier, store, post_barrier 555 // Insert a write-barrier'd store. This is to let generational GC 556 // work; we have to flag all oop-stores before the next GC point. 557 // 558 // It comes in 3 flavors of store to an object, array, or unknown. 559 // We use precise card marks for arrays to avoid scanning the entire 560 // array. We use imprecise for object. We use precise for unknown 561 // since we don't know if we have an array or and object or even 562 // where the object starts. 563 // 564 // If val==NULL, it is taken to be a completely unknown value. QQQ 565 566 Node* store_oop(Node* ctl, 567 Node* obj, // containing obj 568 Node* adr, // actual adress to store val at 569 const TypePtr* adr_type, 570 Node* val, 571 const TypeOopPtr* val_type, 572 BasicType bt, 573 bool use_precise); 574 575 Node* store_oop_to_object(Node* ctl, 576 Node* obj, // containing obj 577 Node* adr, // actual adress to store val at 578 const TypePtr* adr_type, 579 Node* val, 580 const TypeOopPtr* val_type, 581 BasicType bt) { 582 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false); 583 } 584 585 Node* store_oop_to_array(Node* ctl, 586 Node* obj, // containing obj 587 Node* adr, // actual adress to store val at 588 const TypePtr* adr_type, 589 Node* val, 590 const TypeOopPtr* val_type, 591 BasicType bt) { 592 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true); 593 } 594 595 // Could be an array or object we don't know at compile time (unsafe ref.) 596 Node* store_oop_to_unknown(Node* ctl, 597 Node* obj, // containing obj 598 Node* adr, // actual adress to store val at 599 const TypePtr* adr_type, 600 Node* val, 601 BasicType bt); 602 603 // For the few case where the barriers need special help 604 void pre_barrier(bool do_load, Node* ctl, 605 Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type, 606 Node* pre_val, 607 BasicType bt); 608 609 void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx, 610 Node* val, BasicType bt, bool use_precise); 611 612 // Return addressing for an array element. 613 Node* array_element_address(Node* ary, Node* idx, BasicType elembt, 614 // Optional constraint on the array size: 615 const TypeInt* sizetype = NULL); 616 617 // Return a load of array element at idx. 618 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype); 619 620 //---------------- Dtrace support -------------------- 621 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry); 622 void make_dtrace_method_entry(ciMethod* method) { 623 make_dtrace_method_entry_exit(method, true); 624 } 625 void make_dtrace_method_exit(ciMethod* method) { 626 make_dtrace_method_entry_exit(method, false); 627 } 628 629 //--------------- stub generation ------------------- 630 public: 631 void gen_stub(address C_function, 632 const char *name, 633 int is_fancy_jump, 634 bool pass_tls, 635 bool return_pc); 636 637 //---------- help for generating calls -------------- 638 639 // Do a null check on the receiver as it would happen before the call to 640 // callee (with all arguments still on the stack). 641 Node* null_check_receiver_before_call(ciMethod* callee) { 642 assert(!callee->is_static(), "must be a virtual method"); 643 const int nargs = callee->arg_size(); 644 inc_sp(nargs); 645 Node* n = null_check_receiver(); 646 dec_sp(nargs); 647 return n; 648 } 649 650 // Fill in argument edges for the call from argument(0), argument(1), ... 651 // (The next step is to call set_edges_for_java_call.) 652 void set_arguments_for_java_call(CallJavaNode* call); 653 654 // Fill in non-argument edges for the call. 655 // Transform the call, and update the basics: control, i_o, memory. 656 // (The next step is usually to call set_results_for_java_call.) 657 void set_edges_for_java_call(CallJavaNode* call, 658 bool must_throw = false, bool separate_io_proj = false); 659 660 // Finish up a java call that was started by set_edges_for_java_call. 661 // Call add_exception on any throw arising from the call. 662 // Return the call result (transformed). 663 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false); 664 665 // Similar to set_edges_for_java_call, but simplified for runtime calls. 666 void set_predefined_output_for_runtime_call(Node* call) { 667 set_predefined_output_for_runtime_call(call, NULL, NULL); 668 } 669 void set_predefined_output_for_runtime_call(Node* call, 670 Node* keep_mem, 671 const TypePtr* hook_mem); 672 Node* set_predefined_input_for_runtime_call(SafePointNode* call); 673 674 // Replace the call with the current state of the kit. Requires 675 // that the call was generated with separate io_projs so that 676 // exceptional control flow can be handled properly. 677 void replace_call(CallNode* call, Node* result); 678 679 // helper functions for statistics 680 void increment_counter(address counter_addr); // increment a debug counter 681 void increment_counter(Node* counter_addr); // increment a debug counter 682 683 // Bail out to the interpreter right now 684 // The optional klass is the one causing the trap. 685 // The optional reason is debug information written to the compile log. 686 // Optional must_throw is the same as with add_safepoint_edges. 687 void uncommon_trap(int trap_request, 688 ciKlass* klass = NULL, const char* reason_string = NULL, 689 bool must_throw = false, bool keep_exact_action = false); 690 691 // Shorthand, to avoid saying "Deoptimization::" so many times. 692 void uncommon_trap(Deoptimization::DeoptReason reason, 693 Deoptimization::DeoptAction action, 694 ciKlass* klass = NULL, const char* reason_string = NULL, 695 bool must_throw = false, bool keep_exact_action = false) { 696 uncommon_trap(Deoptimization::make_trap_request(reason, action), 697 klass, reason_string, must_throw, keep_exact_action); 698 } 699 700 // SP when bytecode needs to be reexecuted. 701 virtual int reexecute_sp() { return sp(); } 702 703 // Report if there were too many traps at the current method and bci. 704 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 705 // If there is no MDO at all, report no trap unless told to assume it. 706 bool too_many_traps(Deoptimization::DeoptReason reason) { 707 return C->too_many_traps(method(), bci(), reason); 708 } 709 710 // Report if there were too many recompiles at the current method and bci. 711 bool too_many_recompiles(Deoptimization::DeoptReason reason) { 712 return C->too_many_recompiles(method(), bci(), reason); 713 } 714 715 // Returns the object (if any) which was created the moment before. 716 Node* just_allocated_object(Node* current_control); 717 718 static bool use_ReduceInitialCardMarks() { 719 return (ReduceInitialCardMarks 720 && Universe::heap()->can_elide_tlab_store_barriers()); 721 } 722 723 // Sync Ideal and Graph kits. 724 void sync_kit(IdealKit& ideal); 725 void final_sync(IdealKit& ideal); 726 727 // vanilla/CMS post barrier 728 void write_barrier_post(Node *store, Node* obj, 729 Node* adr, uint adr_idx, Node* val, bool use_precise); 730 731 // Allow reordering of pre-barrier with oop store and/or post-barrier. 732 // Used for load_store operations which loads old value. 733 bool can_move_pre_barrier() const; 734 735 // G1 pre/post barriers 736 void g1_write_barrier_pre(bool do_load, 737 Node* obj, 738 Node* adr, 739 uint alias_idx, 740 Node* val, 741 const TypeOopPtr* val_type, 742 Node* pre_val, 743 BasicType bt); 744 745 void g1_write_barrier_post(Node* store, 746 Node* obj, 747 Node* adr, 748 uint alias_idx, 749 Node* val, 750 BasicType bt, 751 bool use_precise); 752 // Helper function for g1 753 private: 754 void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx, 755 Node* index, Node* index_adr, 756 Node* buffer, const TypeFunc* tf); 757 758 public: 759 // Helper function to round double arguments before a call 760 void round_double_arguments(ciMethod* dest_method); 761 void round_double_result(ciMethod* dest_method); 762 763 // rounding for strict float precision conformance 764 Node* precision_rounding(Node* n); 765 766 // rounding for strict double precision conformance 767 Node* dprecision_rounding(Node* n); 768 769 // rounding for non-strict double stores 770 Node* dstore_rounding(Node* n); 771 772 // Helper functions for fast/slow path codes 773 Node* opt_iff(Node* region, Node* iff); 774 Node* make_runtime_call(int flags, 775 const TypeFunc* call_type, address call_addr, 776 const char* call_name, 777 const TypePtr* adr_type, // NULL if no memory effects 778 Node* parm0 = NULL, Node* parm1 = NULL, 779 Node* parm2 = NULL, Node* parm3 = NULL, 780 Node* parm4 = NULL, Node* parm5 = NULL, 781 Node* parm6 = NULL, Node* parm7 = NULL); 782 enum { // flag values for make_runtime_call 783 RC_NO_FP = 1, // CallLeafNoFPNode 784 RC_NO_IO = 2, // do not hook IO edges 785 RC_NO_LEAF = 4, // CallStaticJavaNode 786 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges 787 RC_NARROW_MEM = 16, // input memory is same as output 788 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap 789 RC_LEAF = 0 // null value: no flags set 790 }; 791 792 // merge in all memory slices from new_mem, along the given path 793 void merge_memory(Node* new_mem, Node* region, int new_path); 794 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj); 795 796 // Helper functions to build synchronizations 797 int next_monitor(); 798 Node* insert_mem_bar(int opcode, Node* precedent = NULL); 799 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL); 800 // Optional 'precedent' is appended as an extra edge, to force ordering. 801 FastLockNode* shared_lock(Node* obj); 802 void shared_unlock(Node* box, Node* obj); 803 804 // helper functions for the fast path/slow path idioms 805 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result); 806 807 // Generate an instance-of idiom. Used by both the instance-of bytecode 808 // and the reflective instance-of call. 809 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false); 810 811 // Generate a check-cast idiom. Used by both the check-cast bytecode 812 // and the array-store bytecode 813 Node* gen_checkcast( Node *subobj, Node* superkls, 814 Node* *failure_control = NULL ); 815 816 // Generate a subtyping check. Takes as input the subtype and supertype. 817 // Returns 2 values: sets the default control() to the true path and 818 // returns the false path. Only reads from constant memory taken from the 819 // default memory; does not write anything. It also doesn't take in an 820 // Object; if you wish to check an Object you need to load the Object's 821 // class prior to coming here. 822 Node* gen_subtype_check(Node* subklass, Node* superklass); 823 824 // Static parse-time type checking logic for gen_subtype_check: 825 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test }; 826 int static_subtype_check(ciKlass* superk, ciKlass* subk); 827 828 // Exact type check used for predicted calls and casts. 829 // Rewrites (*casted_receiver) to be casted to the stronger type. 830 // (Caller is responsible for doing replace_in_map.) 831 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob, 832 Node* *casted_receiver); 833 834 // implementation of object creation 835 Node* set_output_for_allocation(AllocateNode* alloc, 836 const TypeOopPtr* oop_type); 837 Node* get_layout_helper(Node* klass_node, jint& constant_value); 838 Node* new_instance(Node* klass_node, 839 Node* slow_test = NULL, 840 Node* *return_size_val = NULL); 841 Node* new_array(Node* klass_node, Node* count_val, int nargs, 842 Node* *return_size_val = NULL); 843 844 // java.lang.String helpers 845 Node* load_String_offset(Node* ctrl, Node* str); 846 Node* load_String_length(Node* ctrl, Node* str); 847 Node* load_String_value(Node* ctrl, Node* str); 848 void store_String_offset(Node* ctrl, Node* str, Node* value); 849 void store_String_length(Node* ctrl, Node* str, Node* value); 850 void store_String_value(Node* ctrl, Node* str, Node* value); 851 852 // Handy for making control flow 853 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) { 854 IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's 855 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time 856 // Place 'if' on worklist if it will be in graph 857 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later 858 return iff; 859 } 860 861 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) { 862 IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's 863 _gvn.transform(iff); // Value may be known at parse-time 864 // Place 'if' on worklist if it will be in graph 865 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later 866 return iff; 867 } 868 869 // Insert a loop predicate into the graph 870 void add_predicate(int nargs = 0); 871 void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); 872 873 // Produce new array node of stable type 874 Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type); 875 }; 876 877 // Helper class to support building of control flow branches. Upon 878 // creation the map and sp at bci are cloned and restored upon de- 879 // struction. Typical use: 880 // 881 // { PreserveJVMState pjvms(this); 882 // // code of new branch 883 // } 884 // // here the JVM state at bci is established 885 886 class PreserveJVMState: public StackObj { 887 protected: 888 GraphKit* _kit; 889 #ifdef ASSERT 890 int _block; // PO of current block, if a Parse 891 int _bci; 892 #endif 893 SafePointNode* _map; 894 uint _sp; 895 896 public: 897 PreserveJVMState(GraphKit* kit, bool clone_map = true); 898 ~PreserveJVMState(); 899 }; 900 901 // Helper class to build cutouts of the form if (p) ; else {x...}. 902 // The code {x...} must not fall through. 903 // The kit's main flow of control is set to the "then" continuation of if(p). 904 class BuildCutout: public PreserveJVMState { 905 public: 906 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN); 907 ~BuildCutout(); 908 }; 909 910 // Helper class to preserve the original _reexecute bit and _sp and restore 911 // them back 912 class PreserveReexecuteState: public StackObj { 913 protected: 914 GraphKit* _kit; 915 uint _sp; 916 JVMState::ReexecuteState _reexecute; 917 918 public: 919 PreserveReexecuteState(GraphKit* kit); 920 ~PreserveReexecuteState(); 921 }; 922 923 #endif // SHARE_VM_OPTO_GRAPHKIT_HPP