1 #ifdef USE_PRAGMA_IDENT_HDR 2 #pragma ident "@(#)graphKit.hpp 1.59 07/08/07 15:24:25 JVM" 3 #endif 4 /* 5 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 class FastLockNode; 29 class FastUnlockNode; 30 class Parse; 31 class RootNode; 32 33 //----------------------------------------------------------------------------- 34 //----------------------------GraphKit----------------------------------------- 35 // Toolkit for building the common sorts of subgraphs. 36 // Does not know about bytecode parsing or type-flow results. 37 // It is able to create graphs implementing the semantics of most 38 // or all bytecodes, so that it can expand intrinsics and calls. 39 // It may depend on JVMState structure, but it must not depend 40 // on specific bytecode streams. 41 class GraphKit : public Phase { 42 friend class PreserveJVMState; 43 44 protected: 45 ciEnv* _env; // Compilation environment 46 PhaseGVN &_gvn; // Some optimizations while parsing 47 SafePointNode* _map; // Parser map from JVM to Nodes 48 SafePointNode* _exceptions;// Parser map(s) for exception state(s) 49 int _sp; // JVM Expression Stack Pointer 50 int _bci; // JVM Bytecode Pointer 51 ciMethod* _method; // JVM Current Method 52 53 private: 54 SafePointNode* map_not_null() const { 55 assert(_map != NULL, "must call stopped() to test for reset compiler map"); 56 return _map; 57 } 58 59 public: 60 GraphKit(); // empty constructor 61 GraphKit(JVMState* jvms); // the JVM state on which to operate 62 63 #ifdef ASSERT 64 ~GraphKit() { 65 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); 66 } 67 #endif 68 69 virtual Parse* is_Parse() const { return NULL; } 70 71 ciEnv* env() const { return _env; } 72 PhaseGVN& gvn() const { return _gvn; } 73 74 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile 75 76 // Handy well-known nodes: 77 Node* null() const { return zerocon(T_OBJECT); } 78 Node* top() const { return C->top(); } 79 RootNode* root() const { return C->root(); } 80 81 // Create or find a constant node 82 Node* intcon(jint con) const { return _gvn.intcon(con); } 83 Node* longcon(jlong con) const { return _gvn.longcon(con); } 84 Node* makecon(const Type *t) const { return _gvn.makecon(t); } 85 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); } 86 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.) 87 88 jint find_int_con(Node* n, jint value_if_unknown) { 89 return _gvn.find_int_con(n, value_if_unknown); 90 } 91 jlong find_long_con(Node* n, jlong value_if_unknown) { 92 return _gvn.find_long_con(n, value_if_unknown); 93 } 94 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.) 95 96 // JVM State accessors: 97 // Parser mapping from JVM indices into Nodes. 98 // Low slots are accessed by the StartNode::enum. 99 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals(); 100 // Then come JVM stack slots. 101 // Finally come the monitors, if any. 102 // See layout accessors in class JVMState. 103 104 SafePointNode* map() const { return _map; } 105 bool has_exceptions() const { return _exceptions != NULL; } 106 JVMState* jvms() const { return map_not_null()->_jvms; } 107 int sp() const { return _sp; } 108 int bci() const { return _bci; } 109 Bytecodes::Code java_bc() const; 110 ciMethod* method() const { return _method; } 111 112 void set_jvms(JVMState* jvms) { set_map(jvms->map()); 113 assert(jvms == this->jvms(), "sanity"); 114 _sp = jvms->sp(); 115 _bci = jvms->bci(); 116 _method = jvms->has_method() ? jvms->method() : NULL; } 117 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); } 118 void set_sp(int i) { assert(i >= 0, "must be non-negative"); _sp = i; } 119 void clean_stack(int from_sp); // clear garbage beyond from_sp to top 120 121 void inc_sp(int i) { set_sp(sp() + i); } 122 void set_bci(int bci) { _bci = bci; } 123 124 // Make sure jvms has current bci & sp. 125 JVMState* sync_jvms() const; 126 #ifdef ASSERT 127 // Make sure JVMS has an updated copy of bci and sp. 128 // Also sanity-check method, depth, and monitor depth. 129 bool jvms_in_sync() const; 130 131 // Make sure the map looks OK. 132 void verify_map() const; 133 134 // Make sure a proposed exception state looks OK. 135 static void verify_exception_state(SafePointNode* ex_map); 136 #endif 137 138 // Clone the existing map state. (Implements PreserveJVMState.) 139 SafePointNode* clone_map(); 140 141 // Set the map to a clone of the given one. 142 void set_map_clone(SafePointNode* m); 143 144 // Tell if the compilation is failing. 145 bool failing() const { return C->failing(); } 146 147 // Set _map to NULL, signalling a stop to further bytecode execution. 148 // Preserve the map intact for future use, and return it back to the caller. 149 SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; } 150 151 // Stop, but first smash the map's inputs to NULL, to mark it dead. 152 void stop_and_kill_map(); 153 154 // Tell if _map is NULL, or control is top. 155 bool stopped(); 156 157 // Tell if this method or any caller method has exception handlers. 158 bool has_ex_handler(); 159 160 // Save an exception without blowing stack contents or other JVM state. 161 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.) 162 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop); 163 164 // Recover a saved exception from its map. 165 static Node* saved_ex_oop(SafePointNode* ex_map); 166 167 // Recover a saved exception from its map, and remove it from the map. 168 static Node* clear_saved_ex_oop(SafePointNode* ex_map); 169 170 #ifdef ASSERT 171 // Recover a saved exception from its map, and remove it from the map. 172 static bool has_saved_ex_oop(SafePointNode* ex_map); 173 #endif 174 175 // Push an exception in the canonical position for handlers (stack(0)). 176 void push_ex_oop(Node* ex_oop) { 177 ensure_stack(1); // ensure room to push the exception 178 set_stack(0, ex_oop); 179 set_sp(1); 180 clean_stack(1); 181 } 182 183 // Detach and return an exception state. 184 SafePointNode* pop_exception_state() { 185 SafePointNode* ex_map = _exceptions; 186 if (ex_map != NULL) { 187 _exceptions = ex_map->next_exception(); 188 ex_map->set_next_exception(NULL); 189 debug_only(verify_exception_state(ex_map)); 190 } 191 return ex_map; 192 } 193 194 // Add an exception, using the given JVM state, without commoning. 195 void push_exception_state(SafePointNode* ex_map) { 196 debug_only(verify_exception_state(ex_map)); 197 ex_map->set_next_exception(_exceptions); 198 _exceptions = ex_map; 199 } 200 201 // Turn the current JVM state into an exception state, appending the ex_oop. 202 SafePointNode* make_exception_state(Node* ex_oop); 203 204 // Add an exception, using the given JVM state. 205 // Combine all exceptions with a common exception type into a single state. 206 // (This is done via combine_exception_states.) 207 void add_exception_state(SafePointNode* ex_map); 208 209 // Combine all exceptions of any sort whatever into a single master state. 210 SafePointNode* combine_and_pop_all_exception_states() { 211 if (_exceptions == NULL) return NULL; 212 SafePointNode* phi_map = pop_exception_state(); 213 SafePointNode* ex_map; 214 while ((ex_map = pop_exception_state()) != NULL) { 215 combine_exception_states(ex_map, phi_map); 216 } 217 return phi_map; 218 } 219 220 // Combine the two exception states, building phis as necessary. 221 // The second argument is updated to include contributions from the first. 222 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map); 223 224 // Reset the map to the given state. If there are any half-finished phis 225 // in it (created by combine_exception_states), transform them now. 226 // Returns the exception oop. (Caller must call push_ex_oop if required.) 227 Node* use_exception_state(SafePointNode* ex_map); 228 229 // Collect exceptions from a given JVM state into my exception list. 230 void add_exception_states_from(JVMState* jvms); 231 232 // Collect all raised exceptions into the current JVM state. 233 // Clear the current exception list and map, returns the combined states. 234 JVMState* transfer_exceptions_into_jvms(); 235 236 // Helper to throw a built-in exception. 237 // Range checks take the offending index. 238 // Cast and array store checks take the offending class. 239 // Others do not take the optional argument. 240 // The JVMS must allow the bytecode to be re-executed 241 // via an uncommon trap. 242 void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL); 243 244 // Helper Functions for adding debug information 245 void kill_dead_locals(); 246 #ifdef ASSERT 247 bool dead_locals_are_killed(); 248 #endif 249 // The call may deoptimize. Supply required JVM state as debug info. 250 // If must_throw is true, the call is guaranteed not to return normally. 251 void add_safepoint_edges(SafePointNode* call, 252 bool must_throw = false); 253 254 // How many stack inputs does the current BC consume? 255 // And, how does the stack change after the bytecode? 256 // Returns false if unknown. 257 bool compute_stack_effects(int& inputs, int& depth); 258 259 // Add a fixed offset to a pointer 260 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) { 261 return basic_plus_adr(base, ptr, MakeConX(offset)); 262 } 263 Node* basic_plus_adr(Node* base, intptr_t offset) { 264 return basic_plus_adr(base, base, MakeConX(offset)); 265 } 266 // Add a variable offset to a pointer 267 Node* basic_plus_adr(Node* base, Node* offset) { 268 return basic_plus_adr(base, base, offset); 269 } 270 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset); 271 272 // Convert between int and long, and size_t. 273 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.) 274 Node* ConvI2L(Node* offset); 275 Node* ConvL2I(Node* offset); 276 // Find out the klass of an object. 277 Node* load_object_klass(Node* object); 278 // Find out the length of an array. 279 Node* load_array_length(Node* array); 280 // Helper function to do a NULL pointer check or ZERO check based on type. 281 Node* null_check_common(Node* value, BasicType type, 282 bool assert_null, Node* *null_control); 283 // Throw an exception if a given value is null. 284 // Return the value cast to not-null. 285 // Be clever about equivalent dominating null checks. 286 Node* do_null_check(Node* value, BasicType type) { 287 return null_check_common(value, type, false, NULL); 288 } 289 // Throw an uncommon trap if a given value is __not__ null. 290 // Return the value cast to null, and be clever about dominating checks. 291 Node* do_null_assert(Node* value, BasicType type) { 292 return null_check_common(value, type, true, NULL); 293 } 294 // Null check oop. Return null-path control into (*null_control). 295 // Return a cast-not-null node which depends on the not-null control. 296 // If never_see_null, use an uncommon trap (*null_control sees a top). 297 // The cast is not valid along the null path; keep a copy of the original. 298 Node* null_check_oop(Node* value, Node* *null_control, 299 bool never_see_null = false); 300 301 // Cast obj to not-null on this path 302 Node* cast_not_null(Node* obj, bool do_replace_in_map = true); 303 // Replace all occurrences of one node by another. 304 void replace_in_map(Node* old, Node* neww); 305 306 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms,_sp++,n); } 307 Node* pop() { map_not_null(); return _map->stack(_map->_jvms,--_sp); } 308 Node* peek(int off=0) { map_not_null(); return _map->stack(_map->_jvms, _sp - off - 1); } 309 310 void push_pair(Node* ldval) { 311 push(ldval); 312 push(top()); // the halfword is merely a placeholder 313 } 314 void push_pair_local(int i) { 315 // longs are stored in locals in "push" order 316 push( local(i+0) ); // the real value 317 assert(local(i+1) == top(), ""); 318 push(top()); // halfword placeholder 319 } 320 Node* pop_pair() { 321 // the second half is pushed last & popped first; it contains exactly nothing 322 Node* halfword = pop(); 323 assert(halfword == top(), ""); 324 // the long bits are pushed first & popped last: 325 return pop(); 326 } 327 void set_pair_local(int i, Node* lval) { 328 // longs are stored in locals as a value/half pair (like doubles) 329 set_local(i+0, lval); 330 set_local(i+1, top()); 331 } 332 333 // Push the node, which may be zero, one, or two words. 334 void push_node(BasicType n_type, Node* n) { 335 int n_size = type2size[n_type]; 336 if (n_size == 1) push( n ); // T_INT, ... 337 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG 338 else { assert(n_size == 0, "must be T_VOID"); } 339 } 340 341 Node* pop_node(BasicType n_type) { 342 int n_size = type2size[n_type]; 343 if (n_size == 1) return pop(); 344 else if (n_size == 2) return pop_pair(); 345 else return NULL; 346 } 347 348 Node* control() const { return map_not_null()->control(); } 349 Node* i_o() const { return map_not_null()->i_o(); } 350 Node* returnadr() const { return map_not_null()->returnadr(); } 351 Node* frameptr() const { return map_not_null()->frameptr(); } 352 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); } 353 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); } 354 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); } 355 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); } 356 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); } 357 358 void set_control (Node* c) { map_not_null()->set_control(c); } 359 void set_i_o (Node* c) { map_not_null()->set_i_o(c); } 360 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); } 361 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); } 362 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); } 363 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); } 364 365 // Access unaliased memory 366 Node* memory(uint alias_idx); 367 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); } 368 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); } 369 370 // Access immutable memory 371 Node* immutable_memory() { return C->immutable_memory(); } 372 373 // Set unaliased memory 374 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); } 375 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); } 376 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); } 377 378 // Get the entire memory state (probably a MergeMemNode), and reset it 379 // (The resetting prevents somebody from using the dangling Node pointer.) 380 Node* reset_memory(); 381 382 // Get the entire memory state, asserted to be a MergeMemNode. 383 MergeMemNode* merged_memory() { 384 Node* mem = map_not_null()->memory(); 385 assert(mem->is_MergeMem(), "parse memory is always pre-split"); 386 return mem->as_MergeMem(); 387 } 388 389 // Set the entire memory state; produce a new MergeMemNode. 390 void set_all_memory(Node* newmem); 391 392 // Create a memory projection from the call, then set_all_memory. 393 void set_all_memory_call(Node* call); 394 395 // Create a LoadNode, reading from the parser's memory state. 396 // (Note: require_atomic_access is useful only with T_LONG.) 397 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 398 bool require_atomic_access = false) { 399 // This version computes alias_index from bottom_type 400 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), 401 require_atomic_access); 402 } 403 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) { 404 // This version computes alias_index from an address type 405 assert(adr_type != NULL, "use other make_load factory"); 406 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), 407 require_atomic_access); 408 } 409 // This is the base version which is given an alias index. 410 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false); 411 412 // Create & transform a StoreNode and store the effect into the 413 // parser's memory state. 414 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 415 const TypePtr* adr_type, 416 bool require_atomic_access = false) { 417 // This version computes alias_index from an address type 418 assert(adr_type != NULL, "use other store_to_memory factory"); 419 return store_to_memory(ctl, adr, val, bt, 420 C->get_alias_index(adr_type), 421 require_atomic_access); 422 } 423 // This is the base version which is given alias index 424 // Return the new StoreXNode 425 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 426 int adr_idx, 427 bool require_atomic_access = false); 428 429 430 // All in one pre-barrier, store, post_barrier 431 // Insert a write-barrier'd store. This is to let generational GC 432 // work; we have to flag all oop-stores before the next GC point. 433 // 434 // It comes in 3 flavors of store to an object, array, or unknown. 435 // We use precise card marks for arrays to avoid scanning the entire 436 // array. We use imprecise for object. We use precise for unknown 437 // since we don't know if we have an array or and object or even 438 // where the object starts. 439 // 440 // If val==NULL, it is taken to be a completely unknown value. QQQ 441 442 Node* store_oop_to_object(Node* ctl, 443 Node* obj, // containing obj 444 Node* adr, // actual adress to store val at 445 const TypePtr* adr_type, 446 Node* val, 447 const Type* val_type, 448 BasicType bt); 449 450 Node* store_oop_to_array(Node* ctl, 451 Node* obj, // containing obj 452 Node* adr, // actual adress to store val at 453 const TypePtr* adr_type, 454 Node* val, 455 const Type* val_type, 456 BasicType bt); 457 458 // Could be an array or object we don't know at compile time (unsafe ref.) 459 Node* store_oop_to_unknown(Node* ctl, 460 Node* obj, // containing obj 461 Node* adr, // actual adress to store val at 462 const TypePtr* adr_type, 463 Node* val, 464 const Type* val_type, 465 BasicType bt); 466 467 // For the few case where the barriers need special help 468 void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx, 469 Node* val, const Type* val_type, BasicType bt); 470 471 void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx, 472 Node* val, BasicType bt, bool use_precise); 473 474 // Return addressing for an array element. 475 Node* array_element_address(Node* ary, Node* idx, BasicType elembt, 476 // Optional constraint on the array size: 477 const TypeInt* sizetype = NULL); 478 479 // Return a load of array element at idx. 480 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype); 481 482 // CMS card-marks have an input from the corresponding oop_store 483 void cms_card_mark(Node* ctl, Node* adr, Node* val, Node* oop_store); 484 485 //---------------- Dtrace support -------------------- 486 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry); 487 void make_dtrace_method_entry(ciMethod* method) { 488 make_dtrace_method_entry_exit(method, true); 489 } 490 void make_dtrace_method_exit(ciMethod* method) { 491 make_dtrace_method_entry_exit(method, false); 492 } 493 494 //--------------- stub generation ------------------- 495 public: 496 void gen_stub(address C_function, 497 const char *name, 498 int is_fancy_jump, 499 bool pass_tls, 500 bool return_pc); 501 502 //---------- help for generating calls -------------- 503 504 // Do a null check on the receiver, which is in argument(0). 505 Node* null_check_receiver(ciMethod* callee) { 506 assert(!callee->is_static(), "must be a virtual method"); 507 int nargs = 1 + callee->signature()->size(); 508 // Null check on self without removing any arguments. The argument 509 // null check technically happens in the wrong place, which can lead to 510 // invalid stack traces when the primitive is inlined into a method 511 // which handles NullPointerExceptions. 512 Node* receiver = argument(0); 513 _sp += nargs; 514 receiver = do_null_check(receiver, T_OBJECT); 515 _sp -= nargs; 516 return receiver; 517 } 518 519 // Fill in argument edges for the call from argument(0), argument(1), ... 520 // (The next step is to call set_edges_for_java_call.) 521 void set_arguments_for_java_call(CallJavaNode* call); 522 523 // Fill in non-argument edges for the call. 524 // Transform the call, and update the basics: control, i_o, memory. 525 // (The next step is usually to call set_results_for_java_call.) 526 void set_edges_for_java_call(CallJavaNode* call, 527 bool must_throw = false); 528 529 // Finish up a java call that was started by set_edges_for_java_call. 530 // Call add_exception on any throw arising from the call. 531 // Return the call result (transformed). 532 Node* set_results_for_java_call(CallJavaNode* call); 533 534 // Similar to set_edges_for_java_call, but simplified for runtime calls. 535 void set_predefined_output_for_runtime_call(Node* call) { 536 set_predefined_output_for_runtime_call(call, NULL, NULL); 537 } 538 void set_predefined_output_for_runtime_call(Node* call, 539 Node* keep_mem, 540 const TypePtr* hook_mem); 541 Node* set_predefined_input_for_runtime_call(SafePointNode* call); 542 543 // helper functions for statistics 544 void increment_counter(address counter_addr); // increment a debug counter 545 void increment_counter(Node* counter_addr); // increment a debug counter 546 547 // Bail out to the interpreter right now 548 // The optional klass is the one causing the trap. 549 // The optional reason is debug information written to the compile log. 550 // Optional must_throw is the same as with add_safepoint_edges. 551 void uncommon_trap(int trap_request, 552 ciKlass* klass = NULL, const char* reason_string = NULL, 553 bool must_throw = false, bool keep_exact_action = false); 554 555 // Shorthand, to avoid saying "Deoptimization::" so many times. 556 void uncommon_trap(Deoptimization::DeoptReason reason, 557 Deoptimization::DeoptAction action, 558 ciKlass* klass = NULL, const char* reason_string = NULL, 559 bool must_throw = false, bool keep_exact_action = false) { 560 uncommon_trap(Deoptimization::make_trap_request(reason, action), 561 klass, reason_string, must_throw, keep_exact_action); 562 } 563 564 // Report if there were too many traps at the current method and bci. 565 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. 566 // If there is no MDO at all, report no trap unless told to assume it. 567 bool too_many_traps(Deoptimization::DeoptReason reason) { 568 return C->too_many_traps(method(), bci(), reason); 569 } 570 571 // Report if there were too many recompiles at the current method and bci. 572 bool too_many_recompiles(Deoptimization::DeoptReason reason) { 573 return C->too_many_recompiles(method(), bci(), reason); 574 } 575 576 // vanilla/CMS post barrier 577 void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise); 578 579 // Returns the object (if any) which was created the moment before. 580 Node* just_allocated_object(Node* current_control); 581 582 static bool use_ReduceInitialCardMarks() { 583 return (ReduceInitialCardMarks 584 && Universe::heap()->can_elide_tlab_store_barriers()); 585 } 586 587 // Helper function to round double arguments before a call 588 void round_double_arguments(ciMethod* dest_method); 589 void round_double_result(ciMethod* dest_method); 590 591 // rounding for strict float precision conformance 592 Node* precision_rounding(Node* n); 593 594 // rounding for strict double precision conformance 595 Node* dprecision_rounding(Node* n); 596 597 // rounding for non-strict double stores 598 Node* dstore_rounding(Node* n); 599 600 // Helper functions for fast/slow path codes 601 Node* opt_iff(Node* region, Node* iff); 602 Node* make_runtime_call(int flags, 603 const TypeFunc* call_type, address call_addr, 604 const char* call_name, 605 const TypePtr* adr_type, // NULL if no memory effects 606 Node* parm0 = NULL, Node* parm1 = NULL, 607 Node* parm2 = NULL, Node* parm3 = NULL, 608 Node* parm4 = NULL, Node* parm5 = NULL, 609 Node* parm6 = NULL, Node* parm7 = NULL); 610 enum { // flag values for make_runtime_call 611 RC_NO_FP = 1, // CallLeafNoFPNode 612 RC_NO_IO = 2, // do not hook IO edges 613 RC_NO_LEAF = 4, // CallStaticJavaNode 614 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges 615 RC_NARROW_MEM = 16, // input memory is same as output 616 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap 617 RC_LEAF = 0 // null value: no flags set 618 }; 619 620 // merge in all memory slices from new_mem, along the given path 621 void merge_memory(Node* new_mem, Node* region, int new_path); 622 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj); 623 624 // Helper functions to build synchronizations 625 int next_monitor(); 626 Node* insert_mem_bar(int opcode, Node* precedent = NULL); 627 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL); 628 // Optional 'precedent' is appended as an extra edge, to force ordering. 629 FastLockNode* shared_lock(Node* obj); 630 void shared_unlock(Node* box, Node* obj); 631 632 // helper functions for the fast path/slow path idioms 633 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, klassOop ex_klass, Node* slow_result); 634 635 // Generate an instance-of idiom. Used by both the instance-of bytecode 636 // and the reflective instance-of call. 637 Node* gen_instanceof( Node *subobj, Node* superkls ); 638 639 // Generate a check-cast idiom. Used by both the check-cast bytecode 640 // and the array-store bytecode 641 Node* gen_checkcast( Node *subobj, Node* superkls, 642 Node* *failure_control = NULL ); 643 644 // Generate a subtyping check. Takes as input the subtype and supertype. 645 // Returns 2 values: sets the default control() to the true path and 646 // returns the false path. Only reads from constant memory taken from the 647 // default memory; does not write anything. It also doesn't take in an 648 // Object; if you wish to check an Object you need to load the Object's 649 // class prior to coming here. 650 Node* gen_subtype_check(Node* subklass, Node* superklass); 651 652 // Static parse-time type checking logic for gen_subtype_check: 653 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test }; 654 int static_subtype_check(ciKlass* superk, ciKlass* subk); 655 656 // Exact type check used for predicted calls and casts. 657 // Rewrites (*casted_receiver) to be casted to the stronger type. 658 // (Caller is responsible for doing replace_in_map.) 659 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob, 660 Node* *casted_receiver); 661 662 // implementation of object creation 663 Node* set_output_for_allocation(AllocateNode* alloc, 664 const TypeOopPtr* oop_type, 665 bool raw_mem_only); 666 Node* get_layout_helper(Node* klass_node, jint& constant_value); 667 Node* new_instance(Node* klass_node, 668 Node* slow_test = NULL, 669 bool raw_mem_only = false, 670 Node* *return_size_val = NULL); 671 Node* new_array(Node* klass_node, Node* count_val, 672 bool raw_mem_only = false, Node* *return_size_val = NULL); 673 674 // Handy for making control flow 675 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) { 676 IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's 677 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time 678 // Place 'if' on worklist if it will be in graph 679 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later 680 return iff; 681 } 682 683 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) { 684 IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's 685 _gvn.transform(iff); // Value may be known at parse-time 686 // Place 'if' on worklist if it will be in graph 687 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later 688 return iff; 689 } 690 }; 691 692 // Helper class to support building of control flow branches. Upon 693 // creation the map and sp at bci are cloned and restored upon de- 694 // struction. Typical use: 695 // 696 // { PreserveJVMState pjvms(this); 697 // // code of new branch 698 // } 699 // // here the JVM state at bci is established 700 701 class PreserveJVMState: public StackObj { 702 protected: 703 GraphKit* _kit; 704 #ifdef ASSERT 705 int _block; // PO of current block, if a Parse 706 int _bci; 707 #endif 708 SafePointNode* _map; 709 uint _sp; 710 711 public: 712 PreserveJVMState(GraphKit* kit, bool clone_map = true); 713 ~PreserveJVMState(); 714 }; 715 716 // Helper class to build cutouts of the form if (p) ; else {x...}. 717 // The code {x...} must not fall through. 718 // The kit's main flow of control is set to the "then" continuation of if(p). 719 class BuildCutout: public PreserveJVMState { 720 public: 721 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN); 722 ~BuildCutout(); 723 }; 724