Print this page
rev 1025 : imported patch indy.compiler.patch
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/opto/callnode.hpp
+++ new/src/share/vm/opto/callnode.hpp
1 1 /*
2 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 21 * have any questions.
22 22 *
23 23 */
24 24
25 25 // Portions of code courtesy of Clifford Click
26 26
27 27 // Optimization - Graph Style
28 28
29 29 class Chaitin;
30 30 class NamedCounter;
31 31 class MultiNode;
32 32 class SafePointNode;
33 33 class CallNode;
34 34 class CallJavaNode;
35 35 class CallStaticJavaNode;
36 36 class CallDynamicJavaNode;
37 37 class CallRuntimeNode;
38 38 class CallLeafNode;
39 39 class CallLeafNoFPNode;
40 40 class AllocateNode;
41 41 class AllocateArrayNode;
42 42 class LockNode;
43 43 class UnlockNode;
44 44 class JVMState;
45 45 class OopMap;
46 46 class State;
47 47 class StartNode;
48 48 class MachCallNode;
49 49 class FastLockNode;
50 50
51 51 //------------------------------StartNode--------------------------------------
52 52 // The method start node
53 53 class StartNode : public MultiNode {
54 54 virtual uint cmp( const Node &n ) const;
55 55 virtual uint size_of() const; // Size is bigger
56 56 public:
57 57 const TypeTuple *_domain;
58 58 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
59 59 init_class_id(Class_Start);
60 60 init_flags(Flag_is_block_start);
61 61 init_req(0,this);
62 62 init_req(1,root);
63 63 }
64 64 virtual int Opcode() const;
65 65 virtual bool pinned() const { return true; };
66 66 virtual const Type *bottom_type() const;
67 67 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
68 68 virtual const Type *Value( PhaseTransform *phase ) const;
69 69 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
70 70 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
71 71 virtual const RegMask &in_RegMask(uint) const;
72 72 virtual Node *match( const ProjNode *proj, const Matcher *m );
73 73 virtual uint ideal_reg() const { return 0; }
74 74 #ifndef PRODUCT
75 75 virtual void dump_spec(outputStream *st) const;
76 76 #endif
77 77 };
78 78
79 79 //------------------------------StartOSRNode-----------------------------------
80 80 // The method start node for on stack replacement code
81 81 class StartOSRNode : public StartNode {
82 82 public:
83 83 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
84 84 virtual int Opcode() const;
85 85 static const TypeTuple *osr_domain();
86 86 };
87 87
88 88
89 89 //------------------------------ParmNode---------------------------------------
90 90 // Incoming parameters
91 91 class ParmNode : public ProjNode {
92 92 static const char * const names[TypeFunc::Parms+1];
93 93 public:
94 94 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
95 95 init_class_id(Class_Parm);
96 96 }
97 97 virtual int Opcode() const;
98 98 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
99 99 virtual uint ideal_reg() const;
100 100 #ifndef PRODUCT
101 101 virtual void dump_spec(outputStream *st) const;
102 102 #endif
103 103 };
104 104
105 105
106 106 //------------------------------ReturnNode-------------------------------------
107 107 // Return from subroutine node
108 108 class ReturnNode : public Node {
109 109 public:
110 110 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
111 111 virtual int Opcode() const;
112 112 virtual bool is_CFG() const { return true; }
113 113 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
114 114 virtual bool depends_only_on_test() const { return false; }
115 115 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
116 116 virtual const Type *Value( PhaseTransform *phase ) const;
117 117 virtual uint ideal_reg() const { return NotAMachineReg; }
118 118 virtual uint match_edge(uint idx) const;
119 119 #ifndef PRODUCT
120 120 virtual void dump_req() const;
121 121 #endif
122 122 };
123 123
124 124
125 125 //------------------------------RethrowNode------------------------------------
126 126 // Rethrow of exception at call site. Ends a procedure before rethrowing;
127 127 // ends the current basic block like a ReturnNode. Restores registers and
128 128 // unwinds stack. Rethrow happens in the caller's method.
129 129 class RethrowNode : public Node {
130 130 public:
131 131 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
132 132 virtual int Opcode() const;
133 133 virtual bool is_CFG() const { return true; }
134 134 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
135 135 virtual bool depends_only_on_test() const { return false; }
136 136 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
137 137 virtual const Type *Value( PhaseTransform *phase ) const;
138 138 virtual uint match_edge(uint idx) const;
139 139 virtual uint ideal_reg() const { return NotAMachineReg; }
140 140 #ifndef PRODUCT
141 141 virtual void dump_req() const;
142 142 #endif
143 143 };
144 144
145 145
146 146 //------------------------------TailCallNode-----------------------------------
147 147 // Pop stack frame and jump indirect
148 148 class TailCallNode : public ReturnNode {
149 149 public:
150 150 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
151 151 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
152 152 init_req(TypeFunc::Parms, target);
153 153 init_req(TypeFunc::Parms+1, moop);
154 154 }
155 155
156 156 virtual int Opcode() const;
157 157 virtual uint match_edge(uint idx) const;
158 158 };
159 159
160 160 //------------------------------TailJumpNode-----------------------------------
161 161 // Pop stack frame and jump indirect
162 162 class TailJumpNode : public ReturnNode {
163 163 public:
164 164 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
165 165 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
166 166 init_req(TypeFunc::Parms, target);
167 167 init_req(TypeFunc::Parms+1, ex_oop);
168 168 }
169 169
170 170 virtual int Opcode() const;
171 171 virtual uint match_edge(uint idx) const;
172 172 };
173 173
174 174 //-------------------------------JVMState-------------------------------------
175 175 // A linked list of JVMState nodes captures the whole interpreter state,
176 176 // plus GC roots, for all active calls at some call site in this compilation
177 177 // unit. (If there is no inlining, then the list has exactly one link.)
178 178 // This provides a way to map the optimized program back into the interpreter,
179 179 // or to let the GC mark the stack.
180 180 class JVMState : public ResourceObj {
181 181 public:
182 182 typedef enum {
183 183 Reexecute_Undefined = -1, // not defined -- will be translated into false later
184 184 Reexecute_False = 0, // false -- do not reexecute
185 185 Reexecute_True = 1 // true -- reexecute the bytecode
186 186 } ReexecuteState; //Reexecute State
187 187
188 188 private:
189 189 JVMState* _caller; // List pointer for forming scope chains
190 190 uint _depth; // One mroe than caller depth, or one.
191 191 uint _locoff; // Offset to locals in input edge mapping
192 192 uint _stkoff; // Offset to stack in input edge mapping
193 193 uint _monoff; // Offset to monitors in input edge mapping
194 194 uint _scloff; // Offset to fields of scalar objs in input edge mapping
195 195 uint _endoff; // Offset to end of input edge mapping
196 196 uint _sp; // Jave Expression Stack Pointer for this state
197 197 int _bci; // Byte Code Index of this JVM point
198 198 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
199 199 ciMethod* _method; // Method Pointer
200 200 SafePointNode* _map; // Map node associated with this scope
201 201 public:
202 202 friend class Compile;
203 203 friend class PreserveReexecuteState;
204 204
205 205 // Because JVMState objects live over the entire lifetime of the
206 206 // Compile object, they are allocated into the comp_arena, which
207 207 // does not get resource marked or reset during the compile process
208 208 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
209 209 void operator delete( void * ) { } // fast deallocation
210 210
211 211 // Create a new JVMState, ready for abstract interpretation.
212 212 JVMState(ciMethod* method, JVMState* caller);
213 213 JVMState(int stack_size); // root state; has a null method
214 214
215 215 // Access functions for the JVM
216 216 uint locoff() const { return _locoff; }
217 217 uint stkoff() const { return _stkoff; }
218 218 uint argoff() const { return _stkoff + _sp; }
219 219 uint monoff() const { return _monoff; }
220 220 uint scloff() const { return _scloff; }
221 221 uint endoff() const { return _endoff; }
222 222 uint oopoff() const { return debug_end(); }
223 223
224 224 int loc_size() const { return _stkoff - _locoff; }
225 225 int stk_size() const { return _monoff - _stkoff; }
226 226 int mon_size() const { return _scloff - _monoff; }
227 227 int scl_size() const { return _endoff - _scloff; }
228 228
229 229 bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; }
230 230 bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; }
231 231 bool is_mon(uint i) const { return i >= _monoff && i < _scloff; }
232 232 bool is_scl(uint i) const { return i >= _scloff && i < _endoff; }
233 233
234 234 uint sp() const { return _sp; }
235 235 int bci() const { return _bci; }
236 236 bool should_reexecute() const { return _reexecute==Reexecute_True; }
237 237 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
238 238 bool has_method() const { return _method != NULL; }
239 239 ciMethod* method() const { assert(has_method(), ""); return _method; }
240 240 JVMState* caller() const { return _caller; }
241 241 SafePointNode* map() const { return _map; }
242 242 uint depth() const { return _depth; }
243 243 uint debug_start() const; // returns locoff of root caller
244 244 uint debug_end() const; // returns endoff of self
245 245 uint debug_size() const {
246 246 return loc_size() + sp() + mon_size() + scl_size();
247 247 }
248 248 uint debug_depth() const; // returns sum of debug_size values at all depths
249 249
250 250 // Returns the JVM state at the desired depth (1 == root).
251 251 JVMState* of_depth(int d) const;
252 252
253 253 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
254 254 bool same_calls_as(const JVMState* that) const;
255 255
256 256 // Monitors (monitors are stored as (boxNode, objNode) pairs
257 257 enum { logMonitorEdges = 1 };
258 258 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
259 259 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
260 260 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
261 261 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
262 262 bool is_monitor_box(uint off) const {
263 263 assert(is_mon(off), "should be called only for monitor edge");
264 264 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
265 265 }
266 266 bool is_monitor_use(uint off) const { return (is_mon(off)
267 267 && is_monitor_box(off))
268 268 || (caller() && caller()->is_monitor_use(off)); }
269 269
270 270 // Initialization functions for the JVM
271 271 void set_locoff(uint off) { _locoff = off; }
272 272 void set_stkoff(uint off) { _stkoff = off; }
273 273 void set_monoff(uint off) { _monoff = off; }
274 274 void set_scloff(uint off) { _scloff = off; }
275 275 void set_endoff(uint off) { _endoff = off; }
276 276 void set_offsets(uint off) {
277 277 _locoff = _stkoff = _monoff = _scloff = _endoff = off;
278 278 }
279 279 void set_map(SafePointNode *map) { _map = map; }
280 280 void set_sp(uint sp) { _sp = sp; }
281 281 // _reexecute is initialized to "undefined" for a new bci
282 282 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
283 283 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
284 284
285 285 // Miscellaneous utility functions
286 286 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
287 287 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
288 288
289 289 #ifndef PRODUCT
290 290 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
291 291 void dump_spec(outputStream *st) const;
292 292 void dump_on(outputStream* st) const;
293 293 void dump() const {
294 294 dump_on(tty);
295 295 }
296 296 #endif
297 297 };
298 298
299 299 //------------------------------SafePointNode----------------------------------
300 300 // A SafePointNode is a subclass of a MultiNode for convenience (and
301 301 // potential code sharing) only - conceptually it is independent of
302 302 // the Node semantics.
303 303 class SafePointNode : public MultiNode {
304 304 virtual uint cmp( const Node &n ) const;
305 305 virtual uint size_of() const; // Size is bigger
306 306
307 307 public:
308 308 SafePointNode(uint edges, JVMState* jvms,
309 309 // A plain safepoint advertises no memory effects (NULL):
310 310 const TypePtr* adr_type = NULL)
311 311 : MultiNode( edges ),
312 312 _jvms(jvms),
313 313 _oop_map(NULL),
314 314 _adr_type(adr_type)
315 315 {
316 316 init_class_id(Class_SafePoint);
317 317 }
318 318
319 319 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC
320 320 JVMState* const _jvms; // Pointer to list of JVM State objects
321 321 const TypePtr* _adr_type; // What type of memory does this node produce?
322 322
323 323 // Many calls take *all* of memory as input,
324 324 // but some produce a limited subset of that memory as output.
325 325 // The adr_type reports the call's behavior as a store, not a load.
326 326
327 327 virtual JVMState* jvms() const { return _jvms; }
328 328 void set_jvms(JVMState* s) {
329 329 *(JVMState**)&_jvms = s; // override const attribute in the accessor
330 330 }
331 331 OopMap *oop_map() const { return _oop_map; }
332 332 void set_oop_map(OopMap *om) { _oop_map = om; }
333 333
334 334 // Functionality from old debug nodes which has changed
335 335 Node *local(JVMState* jvms, uint idx) const {
336 336 assert(verify_jvms(jvms), "jvms must match");
337 337 return in(jvms->locoff() + idx);
338 338 }
339 339 Node *stack(JVMState* jvms, uint idx) const {
340 340 assert(verify_jvms(jvms), "jvms must match");
341 341 return in(jvms->stkoff() + idx);
342 342 }
343 343 Node *argument(JVMState* jvms, uint idx) const {
344 344 assert(verify_jvms(jvms), "jvms must match");
345 345 return in(jvms->argoff() + idx);
346 346 }
347 347 Node *monitor_box(JVMState* jvms, uint idx) const {
348 348 assert(verify_jvms(jvms), "jvms must match");
349 349 return in(jvms->monitor_box_offset(idx));
350 350 }
351 351 Node *monitor_obj(JVMState* jvms, uint idx) const {
352 352 assert(verify_jvms(jvms), "jvms must match");
353 353 return in(jvms->monitor_obj_offset(idx));
354 354 }
355 355
356 356 void set_local(JVMState* jvms, uint idx, Node *c);
357 357
358 358 void set_stack(JVMState* jvms, uint idx, Node *c) {
359 359 assert(verify_jvms(jvms), "jvms must match");
360 360 set_req(jvms->stkoff() + idx, c);
361 361 }
362 362 void set_argument(JVMState* jvms, uint idx, Node *c) {
363 363 assert(verify_jvms(jvms), "jvms must match");
364 364 set_req(jvms->argoff() + idx, c);
365 365 }
366 366 void ensure_stack(JVMState* jvms, uint stk_size) {
367 367 assert(verify_jvms(jvms), "jvms must match");
368 368 int grow_by = (int)stk_size - (int)jvms->stk_size();
369 369 if (grow_by > 0) grow_stack(jvms, grow_by);
370 370 }
371 371 void grow_stack(JVMState* jvms, uint grow_by);
372 372 // Handle monitor stack
373 373 void push_monitor( const FastLockNode *lock );
374 374 void pop_monitor ();
375 375 Node *peek_monitor_box() const;
376 376 Node *peek_monitor_obj() const;
377 377
378 378 // Access functions for the JVM
379 379 Node *control () const { return in(TypeFunc::Control ); }
380 380 Node *i_o () const { return in(TypeFunc::I_O ); }
381 381 Node *memory () const { return in(TypeFunc::Memory ); }
382 382 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
383 383 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
384 384
385 385 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
386 386 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
387 387 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
388 388
389 389 MergeMemNode* merged_memory() const {
390 390 return in(TypeFunc::Memory)->as_MergeMem();
391 391 }
392 392
393 393 // The parser marks useless maps as dead when it's done with them:
394 394 bool is_killed() { return in(TypeFunc::Control) == NULL; }
395 395
396 396 // Exception states bubbling out of subgraphs such as inlined calls
397 397 // are recorded here. (There might be more than one, hence the "next".)
398 398 // This feature is used only for safepoints which serve as "maps"
399 399 // for JVM states during parsing, intrinsic expansion, etc.
400 400 SafePointNode* next_exception() const;
401 401 void set_next_exception(SafePointNode* n);
402 402 bool has_exceptions() const { return next_exception() != NULL; }
403 403
404 404 // Standard Node stuff
405 405 virtual int Opcode() const;
406 406 virtual bool pinned() const { return true; }
407 407 virtual const Type *Value( PhaseTransform *phase ) const;
408 408 virtual const Type *bottom_type() const { return Type::CONTROL; }
409 409 virtual const TypePtr *adr_type() const { return _adr_type; }
410 410 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
411 411 virtual Node *Identity( PhaseTransform *phase );
412 412 virtual uint ideal_reg() const { return 0; }
413 413 virtual const RegMask &in_RegMask(uint) const;
414 414 virtual const RegMask &out_RegMask() const;
415 415 virtual uint match_edge(uint idx) const;
416 416
417 417 static bool needs_polling_address_input();
418 418
419 419 #ifndef PRODUCT
420 420 virtual void dump_spec(outputStream *st) const;
421 421 #endif
422 422 };
423 423
424 424 //------------------------------SafePointScalarObjectNode----------------------
425 425 // A SafePointScalarObjectNode represents the state of a scalarized object
426 426 // at a safepoint.
427 427
428 428 class SafePointScalarObjectNode: public TypeNode {
429 429 uint _first_index; // First input edge index of a SafePoint node where
430 430 // states of the scalarized object fields are collected.
431 431 uint _n_fields; // Number of non-static fields of the scalarized object.
432 432 DEBUG_ONLY(AllocateNode* _alloc;)
433 433 public:
434 434 SafePointScalarObjectNode(const TypeOopPtr* tp,
435 435 #ifdef ASSERT
436 436 AllocateNode* alloc,
437 437 #endif
438 438 uint first_index, uint n_fields);
439 439 virtual int Opcode() const;
440 440 virtual uint ideal_reg() const;
441 441 virtual const RegMask &in_RegMask(uint) const;
442 442 virtual const RegMask &out_RegMask() const;
443 443 virtual uint match_edge(uint idx) const;
444 444
445 445 uint first_index() const { return _first_index; }
446 446 uint n_fields() const { return _n_fields; }
447 447 DEBUG_ONLY(AllocateNode* alloc() const { return _alloc; })
448 448
449 449 // SafePointScalarObject should be always pinned to the control edge
450 450 // of the SafePoint node for which it was generated.
451 451 virtual bool pinned() const; // { return true; }
452 452
453 453 // SafePointScalarObject depends on the SafePoint node
454 454 // for which it was generated.
455 455 virtual bool depends_only_on_test() const; // { return false; }
456 456
457 457 virtual uint size_of() const { return sizeof(*this); }
458 458
459 459 // Assumes that "this" is an argument to a safepoint node "s", and that
460 460 // "new_call" is being created to correspond to "s". But the difference
461 461 // between the start index of the jvmstates of "new_call" and "s" is
462 462 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
463 463 // corresponds appropriately to "this" in "new_call". Assumes that
464 464 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
465 465 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
466 466 SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
467 467
468 468 #ifndef PRODUCT
469 469 virtual void dump_spec(outputStream *st) const;
470 470 #endif
471 471 };
472 472
473 473 //------------------------------CallNode---------------------------------------
474 474 // Call nodes now subsume the function of debug nodes at callsites, so they
475 475 // contain the functionality of a full scope chain of debug nodes.
476 476 class CallNode : public SafePointNode {
477 477 public:
478 478 const TypeFunc *_tf; // Function type
479 479 address _entry_point; // Address of method being called
480 480 float _cnt; // Estimate of number of times called
481 481
482 482 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
483 483 : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
484 484 _tf(tf),
485 485 _entry_point(addr),
486 486 _cnt(COUNT_UNKNOWN)
487 487 {
488 488 init_class_id(Class_Call);
489 489 init_flags(Flag_is_Call);
490 490 }
491 491
492 492 const TypeFunc* tf() const { return _tf; }
493 493 const address entry_point() const { return _entry_point; }
494 494 const float cnt() const { return _cnt; }
495 495
496 496 void set_tf(const TypeFunc* tf) { _tf = tf; }
497 497 void set_entry_point(address p) { _entry_point = p; }
498 498 void set_cnt(float c) { _cnt = c; }
499 499
500 500 virtual const Type *bottom_type() const;
501 501 virtual const Type *Value( PhaseTransform *phase ) const;
502 502 virtual Node *Identity( PhaseTransform *phase ) { return this; }
503 503 virtual uint cmp( const Node &n ) const;
504 504 virtual uint size_of() const = 0;
505 505 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
506 506 virtual Node *match( const ProjNode *proj, const Matcher *m );
507 507 virtual uint ideal_reg() const { return NotAMachineReg; }
508 508 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
509 509 // for some macro nodes whose expansion does not have a safepoint on the fast path.
510 510 virtual bool guaranteed_safepoint() { return true; }
511 511 // For macro nodes, the JVMState gets modified during expansion, so when cloning
512 512 // the node the JVMState must be cloned.
513 513 virtual void clone_jvms() { } // default is not to clone
514 514
515 515 // Returns true if the call may modify n
516 516 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase);
517 517 // Does this node have a use of n other than in debug information?
518 518 bool has_non_debug_use(Node *n);
519 519 // Returns the unique CheckCastPP of a call
520 520 // or result projection is there are several CheckCastPP
521 521 // or returns NULL if there is no one.
522 522 Node *result_cast();
523 523
524 524 virtual uint match_edge(uint idx) const;
525 525
526 526 #ifndef PRODUCT
527 527 virtual void dump_req() const;
528 528 virtual void dump_spec(outputStream *st) const;
529 529 #endif
530 530 };
531 531
↓ open down ↓ |
531 lines elided |
↑ open up ↑ |
532 532 //------------------------------CallJavaNode-----------------------------------
533 533 // Make a static or dynamic subroutine call node using Java calling
534 534 // convention. (The "Java" calling convention is the compiler's calling
535 535 // convention, as opposed to the interpreter's or that of native C.)
536 536 class CallJavaNode : public CallNode {
537 537 protected:
538 538 virtual uint cmp( const Node &n ) const;
539 539 virtual uint size_of() const; // Size is bigger
540 540
541 541 bool _optimized_virtual;
542 + bool _method_handle_invoke;
542 543 ciMethod* _method; // Method being direct called
543 544 public:
544 545 const int _bci; // Byte Code Index of call byte code
545 546 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
546 547 : CallNode(tf, addr, TypePtr::BOTTOM),
547 - _method(method), _bci(bci), _optimized_virtual(false)
548 + _method(method), _bci(bci),
549 + _optimized_virtual(false),
550 + _method_handle_invoke(false)
548 551 {
549 552 init_class_id(Class_CallJava);
550 553 }
551 554
552 555 virtual int Opcode() const;
553 556 ciMethod* method() const { return _method; }
554 557 void set_method(ciMethod *m) { _method = m; }
555 558 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
556 559 bool is_optimized_virtual() const { return _optimized_virtual; }
560 + void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
561 + bool is_method_handle_invoke() const { return _method_handle_invoke; }
557 562
558 563 #ifndef PRODUCT
559 564 virtual void dump_spec(outputStream *st) const;
560 565 #endif
561 566 };
562 567
563 568 //------------------------------CallStaticJavaNode-----------------------------
564 569 // Make a direct subroutine call using Java calling convention (for static
565 570 // calls and optimized virtual calls, plus calls to wrappers for run-time
566 571 // routines); generates static stub.
567 572 class CallStaticJavaNode : public CallJavaNode {
568 573 virtual uint cmp( const Node &n ) const;
569 574 virtual uint size_of() const; // Size is bigger
570 575 public:
571 576 CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
572 577 : CallJavaNode(tf, addr, method, bci), _name(NULL) {
573 578 init_class_id(Class_CallStaticJava);
574 579 }
575 580 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
576 581 const TypePtr* adr_type)
577 582 : CallJavaNode(tf, addr, NULL, bci), _name(name) {
578 583 init_class_id(Class_CallStaticJava);
579 584 // This node calls a runtime stub, which often has narrow memory effects.
580 585 _adr_type = adr_type;
581 586 }
582 587 const char *_name; // Runtime wrapper name
583 588
584 589 // If this is an uncommon trap, return the request code, else zero.
585 590 int uncommon_trap_request() const;
586 591 static int extract_uncommon_trap_request(const Node* call);
587 592
588 593 virtual int Opcode() const;
589 594 #ifndef PRODUCT
590 595 virtual void dump_spec(outputStream *st) const;
591 596 #endif
592 597 };
593 598
594 599 //------------------------------CallDynamicJavaNode----------------------------
595 600 // Make a dispatched call using Java calling convention.
596 601 class CallDynamicJavaNode : public CallJavaNode {
597 602 virtual uint cmp( const Node &n ) const;
598 603 virtual uint size_of() const; // Size is bigger
599 604 public:
600 605 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
601 606 init_class_id(Class_CallDynamicJava);
602 607 }
603 608
604 609 int _vtable_index;
605 610 virtual int Opcode() const;
606 611 #ifndef PRODUCT
607 612 virtual void dump_spec(outputStream *st) const;
608 613 #endif
609 614 };
610 615
611 616 //------------------------------CallRuntimeNode--------------------------------
612 617 // Make a direct subroutine call node into compiled C++ code.
613 618 class CallRuntimeNode : public CallNode {
614 619 virtual uint cmp( const Node &n ) const;
615 620 virtual uint size_of() const; // Size is bigger
616 621 public:
617 622 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
618 623 const TypePtr* adr_type)
619 624 : CallNode(tf, addr, adr_type),
620 625 _name(name)
621 626 {
622 627 init_class_id(Class_CallRuntime);
623 628 }
624 629
625 630 const char *_name; // Printable name, if _method is NULL
626 631 virtual int Opcode() const;
627 632 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
628 633
629 634 #ifndef PRODUCT
630 635 virtual void dump_spec(outputStream *st) const;
631 636 #endif
632 637 };
633 638
634 639 //------------------------------CallLeafNode-----------------------------------
635 640 // Make a direct subroutine call node into compiled C++ code, without
636 641 // safepoints
637 642 class CallLeafNode : public CallRuntimeNode {
638 643 public:
639 644 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
640 645 const TypePtr* adr_type)
641 646 : CallRuntimeNode(tf, addr, name, adr_type)
642 647 {
643 648 init_class_id(Class_CallLeaf);
644 649 }
645 650 virtual int Opcode() const;
646 651 virtual bool guaranteed_safepoint() { return false; }
647 652 #ifndef PRODUCT
648 653 virtual void dump_spec(outputStream *st) const;
649 654 #endif
650 655 };
651 656
652 657 //------------------------------CallLeafNoFPNode-------------------------------
653 658 // CallLeafNode, not using floating point or using it in the same manner as
654 659 // the generated code
655 660 class CallLeafNoFPNode : public CallLeafNode {
656 661 public:
657 662 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
658 663 const TypePtr* adr_type)
659 664 : CallLeafNode(tf, addr, name, adr_type)
660 665 {
661 666 }
662 667 virtual int Opcode() const;
663 668 };
664 669
665 670
666 671 //------------------------------Allocate---------------------------------------
667 672 // High-level memory allocation
668 673 //
669 674 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
670 675 // get expanded into a code sequence containing a call. Unlike other CallNodes,
671 676 // they have 2 memory projections and 2 i_o projections (which are distinguished by
672 677 // the _is_io_use flag in the projection.) This is needed when expanding the node in
673 678 // order to differentiate the uses of the projection on the normal control path from
674 679 // those on the exception return path.
675 680 //
676 681 class AllocateNode : public CallNode {
677 682 public:
678 683 enum {
679 684 // Output:
680 685 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
681 686 // Inputs:
682 687 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
683 688 KlassNode, // type (maybe dynamic) of the obj.
684 689 InitialTest, // slow-path test (may be constant)
685 690 ALength, // array length (or TOP if none)
686 691 ParmLimit
687 692 };
688 693
689 694 static const TypeFunc* alloc_type() {
690 695 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
691 696 fields[AllocSize] = TypeInt::POS;
692 697 fields[KlassNode] = TypeInstPtr::NOTNULL;
693 698 fields[InitialTest] = TypeInt::BOOL;
694 699 fields[ALength] = TypeInt::INT; // length (can be a bad length)
695 700
696 701 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
697 702
698 703 // create result type (range)
699 704 fields = TypeTuple::fields(1);
700 705 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
701 706
702 707 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
703 708
704 709 return TypeFunc::make(domain, range);
705 710 }
706 711
707 712 bool _is_scalar_replaceable; // Result of Escape Analysis
708 713
709 714 virtual uint size_of() const; // Size is bigger
710 715 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
711 716 Node *size, Node *klass_node, Node *initial_test);
712 717 // Expansion modifies the JVMState, so we need to clone it
713 718 virtual void clone_jvms() {
714 719 set_jvms(jvms()->clone_deep(Compile::current()));
715 720 }
716 721 virtual int Opcode() const;
717 722 virtual uint ideal_reg() const { return Op_RegP; }
718 723 virtual bool guaranteed_safepoint() { return false; }
719 724
720 725 // allocations do not modify their arguments
721 726 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;}
722 727
723 728 // Pattern-match a possible usage of AllocateNode.
724 729 // Return null if no allocation is recognized.
725 730 // The operand is the pointer produced by the (possible) allocation.
726 731 // It must be a projection of the Allocate or its subsequent CastPP.
727 732 // (Note: This function is defined in file graphKit.cpp, near
728 733 // GraphKit::new_instance/new_array, whose output it recognizes.)
729 734 // The 'ptr' may not have an offset unless the 'offset' argument is given.
730 735 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
731 736
732 737 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
733 738 // an offset, which is reported back to the caller.
734 739 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
735 740 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
736 741 intptr_t& offset);
737 742
738 743 // Dig the klass operand out of a (possible) allocation site.
739 744 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
740 745 AllocateNode* allo = Ideal_allocation(ptr, phase);
741 746 return (allo == NULL) ? NULL : allo->in(KlassNode);
742 747 }
743 748
744 749 // Conservatively small estimate of offset of first non-header byte.
745 750 int minimum_header_size() {
746 751 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
747 752 instanceOopDesc::base_offset_in_bytes();
748 753 }
749 754
750 755 // Return the corresponding initialization barrier (or null if none).
751 756 // Walks out edges to find it...
752 757 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
753 758 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
754 759 InitializeNode* initialization();
755 760
756 761 // Convenience for initialization->maybe_set_complete(phase)
757 762 bool maybe_set_complete(PhaseGVN* phase);
758 763 };
759 764
760 765 //------------------------------AllocateArray---------------------------------
761 766 //
762 767 // High-level array allocation
763 768 //
764 769 class AllocateArrayNode : public AllocateNode {
765 770 public:
766 771 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
767 772 Node* size, Node* klass_node, Node* initial_test,
768 773 Node* count_val
769 774 )
770 775 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
771 776 initial_test)
772 777 {
773 778 init_class_id(Class_AllocateArray);
774 779 set_req(AllocateNode::ALength, count_val);
775 780 }
776 781 virtual int Opcode() const;
777 782 virtual uint size_of() const; // Size is bigger
778 783 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
779 784
780 785 // Dig the length operand out of a array allocation site.
781 786 Node* Ideal_length() {
782 787 return in(AllocateNode::ALength);
783 788 }
784 789
785 790 // Dig the length operand out of a array allocation site and narrow the
786 791 // type with a CastII, if necesssary
787 792 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
788 793
789 794 // Pattern-match a possible usage of AllocateArrayNode.
790 795 // Return null if no allocation is recognized.
791 796 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
792 797 AllocateNode* allo = Ideal_allocation(ptr, phase);
793 798 return (allo == NULL || !allo->is_AllocateArray())
794 799 ? NULL : allo->as_AllocateArray();
795 800 }
796 801 };
797 802
798 803 //------------------------------AbstractLockNode-----------------------------------
799 804 class AbstractLockNode: public CallNode {
800 805 private:
801 806 bool _eliminate; // indicates this lock can be safely eliminated
802 807 bool _coarsened; // indicates this lock was coarsened
803 808 #ifndef PRODUCT
804 809 NamedCounter* _counter;
805 810 #endif
806 811
807 812 protected:
808 813 // helper functions for lock elimination
809 814 //
810 815
811 816 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
812 817 GrowableArray<AbstractLockNode*> &lock_ops);
813 818 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
814 819 GrowableArray<AbstractLockNode*> &lock_ops);
815 820 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
816 821 GrowableArray<AbstractLockNode*> &lock_ops);
817 822 LockNode *find_matching_lock(UnlockNode* unlock);
818 823
819 824
820 825 public:
821 826 AbstractLockNode(const TypeFunc *tf)
822 827 : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
823 828 _coarsened(false),
824 829 _eliminate(false)
825 830 {
826 831 #ifndef PRODUCT
827 832 _counter = NULL;
828 833 #endif
829 834 }
830 835 virtual int Opcode() const = 0;
831 836 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
832 837 Node * box_node() const {return in(TypeFunc::Parms + 1); }
833 838 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
834 839 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
835 840
836 841 virtual uint size_of() const { return sizeof(*this); }
837 842
838 843 bool is_eliminated() {return _eliminate; }
839 844 // mark node as eliminated and update the counter if there is one
840 845 void set_eliminated();
841 846
842 847 bool is_coarsened() { return _coarsened; }
843 848 void set_coarsened() { _coarsened = true; }
844 849
845 850 // locking does not modify its arguments
846 851 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
847 852
848 853 #ifndef PRODUCT
849 854 void create_lock_counter(JVMState* s);
850 855 NamedCounter* counter() const { return _counter; }
851 856 #endif
852 857 };
853 858
854 859 //------------------------------Lock---------------------------------------
855 860 // High-level lock operation
856 861 //
857 862 // This is a subclass of CallNode because it is a macro node which gets expanded
858 863 // into a code sequence containing a call. This node takes 3 "parameters":
859 864 // 0 - object to lock
860 865 // 1 - a BoxLockNode
861 866 // 2 - a FastLockNode
862 867 //
863 868 class LockNode : public AbstractLockNode {
864 869 public:
865 870
866 871 static const TypeFunc *lock_type() {
867 872 // create input type (domain)
868 873 const Type **fields = TypeTuple::fields(3);
869 874 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
870 875 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
871 876 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
872 877 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
873 878
874 879 // create result type (range)
875 880 fields = TypeTuple::fields(0);
876 881
877 882 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
878 883
879 884 return TypeFunc::make(domain,range);
880 885 }
881 886
882 887 virtual int Opcode() const;
883 888 virtual uint size_of() const; // Size is bigger
884 889 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
885 890 init_class_id(Class_Lock);
886 891 init_flags(Flag_is_macro);
887 892 C->add_macro_node(this);
888 893 }
889 894 virtual bool guaranteed_safepoint() { return false; }
890 895
891 896 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
892 897 // Expansion modifies the JVMState, so we need to clone it
893 898 virtual void clone_jvms() {
894 899 set_jvms(jvms()->clone_deep(Compile::current()));
895 900 }
896 901 };
897 902
898 903 //------------------------------Unlock---------------------------------------
899 904 // High-level unlock operation
900 905 class UnlockNode : public AbstractLockNode {
901 906 public:
902 907 virtual int Opcode() const;
903 908 virtual uint size_of() const; // Size is bigger
904 909 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
905 910 init_class_id(Class_Unlock);
906 911 init_flags(Flag_is_macro);
907 912 C->add_macro_node(this);
908 913 }
909 914 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
910 915 // unlock is never a safepoint
911 916 virtual bool guaranteed_safepoint() { return false; }
912 917 };
↓ open down ↓ |
346 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX