Print this page
rev 2237 : [mq]: initial-intrinsification-changes
rev 2238 : [mq]: code-review-comments-vladimir
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/opto/graphKit.hpp
+++ new/src/share/vm/opto/graphKit.hpp
1 1 /*
2 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
2 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
26 26 #define SHARE_VM_OPTO_GRAPHKIT_HPP
27 27
28 28 #include "ci/ciEnv.hpp"
29 29 #include "ci/ciMethodData.hpp"
30 30 #include "opto/addnode.hpp"
31 31 #include "opto/callnode.hpp"
32 32 #include "opto/cfgnode.hpp"
33 33 #include "opto/compile.hpp"
34 34 #include "opto/divnode.hpp"
35 35 #include "opto/mulnode.hpp"
36 36 #include "opto/phaseX.hpp"
37 37 #include "opto/subnode.hpp"
38 38 #include "opto/type.hpp"
39 39 #include "runtime/deoptimization.hpp"
40 40
41 41 class FastLockNode;
42 42 class FastUnlockNode;
43 43 class IdealKit;
44 44 class Parse;
45 45 class RootNode;
46 46
47 47 //-----------------------------------------------------------------------------
48 48 //----------------------------GraphKit-----------------------------------------
49 49 // Toolkit for building the common sorts of subgraphs.
50 50 // Does not know about bytecode parsing or type-flow results.
51 51 // It is able to create graphs implementing the semantics of most
52 52 // or all bytecodes, so that it can expand intrinsics and calls.
53 53 // It may depend on JVMState structure, but it must not depend
54 54 // on specific bytecode streams.
55 55 class GraphKit : public Phase {
56 56 friend class PreserveJVMState;
57 57
58 58 protected:
59 59 ciEnv* _env; // Compilation environment
60 60 PhaseGVN &_gvn; // Some optimizations while parsing
61 61 SafePointNode* _map; // Parser map from JVM to Nodes
62 62 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
63 63 int _sp; // JVM Expression Stack Pointer
64 64 int _bci; // JVM Bytecode Pointer
65 65 ciMethod* _method; // JVM Current Method
66 66
67 67 private:
68 68 SafePointNode* map_not_null() const {
69 69 assert(_map != NULL, "must call stopped() to test for reset compiler map");
70 70 return _map;
71 71 }
72 72
73 73 public:
74 74 GraphKit(); // empty constructor
75 75 GraphKit(JVMState* jvms); // the JVM state on which to operate
76 76
77 77 #ifdef ASSERT
78 78 ~GraphKit() {
79 79 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
80 80 }
81 81 #endif
82 82
83 83 virtual Parse* is_Parse() const { return NULL; }
84 84
85 85 ciEnv* env() const { return _env; }
86 86 PhaseGVN& gvn() const { return _gvn; }
87 87
88 88 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
89 89
90 90 // Handy well-known nodes:
91 91 Node* null() const { return zerocon(T_OBJECT); }
92 92 Node* top() const { return C->top(); }
93 93 RootNode* root() const { return C->root(); }
94 94
95 95 // Create or find a constant node
96 96 Node* intcon(jint con) const { return _gvn.intcon(con); }
97 97 Node* longcon(jlong con) const { return _gvn.longcon(con); }
98 98 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
99 99 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
100 100 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
101 101
102 102 // Helper for byte_map_base
103 103 Node* byte_map_base_node() {
104 104 // Get base of card map
105 105 CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
106 106 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
107 107 if (ct->byte_map_base != NULL) {
108 108 return makecon(TypeRawPtr::make((address)ct->byte_map_base));
109 109 } else {
110 110 return null();
111 111 }
112 112 }
113 113
114 114 jint find_int_con(Node* n, jint value_if_unknown) {
115 115 return _gvn.find_int_con(n, value_if_unknown);
116 116 }
117 117 jlong find_long_con(Node* n, jlong value_if_unknown) {
118 118 return _gvn.find_long_con(n, value_if_unknown);
119 119 }
120 120 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
121 121
122 122 // JVM State accessors:
123 123 // Parser mapping from JVM indices into Nodes.
124 124 // Low slots are accessed by the StartNode::enum.
125 125 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
126 126 // Then come JVM stack slots.
127 127 // Finally come the monitors, if any.
128 128 // See layout accessors in class JVMState.
129 129
130 130 SafePointNode* map() const { return _map; }
131 131 bool has_exceptions() const { return _exceptions != NULL; }
132 132 JVMState* jvms() const { return map_not_null()->_jvms; }
133 133 int sp() const { return _sp; }
134 134 int bci() const { return _bci; }
135 135 Bytecodes::Code java_bc() const;
136 136 ciMethod* method() const { return _method; }
137 137
138 138 void set_jvms(JVMState* jvms) { set_map(jvms->map());
139 139 assert(jvms == this->jvms(), "sanity");
140 140 _sp = jvms->sp();
141 141 _bci = jvms->bci();
142 142 _method = jvms->has_method() ? jvms->method() : NULL; }
143 143 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
144 144 void set_sp(int i) { assert(i >= 0, "must be non-negative"); _sp = i; }
145 145 void clean_stack(int from_sp); // clear garbage beyond from_sp to top
146 146
147 147 void inc_sp(int i) { set_sp(sp() + i); }
148 148 void set_bci(int bci) { _bci = bci; }
149 149
150 150 // Make sure jvms has current bci & sp.
151 151 JVMState* sync_jvms() const;
152 152 #ifdef ASSERT
153 153 // Make sure JVMS has an updated copy of bci and sp.
154 154 // Also sanity-check method, depth, and monitor depth.
155 155 bool jvms_in_sync() const;
156 156
157 157 // Make sure the map looks OK.
158 158 void verify_map() const;
159 159
160 160 // Make sure a proposed exception state looks OK.
161 161 static void verify_exception_state(SafePointNode* ex_map);
162 162 #endif
163 163
164 164 // Clone the existing map state. (Implements PreserveJVMState.)
165 165 SafePointNode* clone_map();
166 166
167 167 // Set the map to a clone of the given one.
168 168 void set_map_clone(SafePointNode* m);
169 169
170 170 // Tell if the compilation is failing.
171 171 bool failing() const { return C->failing(); }
172 172
173 173 // Set _map to NULL, signalling a stop to further bytecode execution.
174 174 // Preserve the map intact for future use, and return it back to the caller.
175 175 SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
176 176
177 177 // Stop, but first smash the map's inputs to NULL, to mark it dead.
178 178 void stop_and_kill_map();
179 179
180 180 // Tell if _map is NULL, or control is top.
181 181 bool stopped();
182 182
183 183 // Tell if this method or any caller method has exception handlers.
184 184 bool has_ex_handler();
185 185
186 186 // Save an exception without blowing stack contents or other JVM state.
187 187 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
188 188 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
189 189
190 190 // Recover a saved exception from its map.
191 191 static Node* saved_ex_oop(SafePointNode* ex_map);
192 192
193 193 // Recover a saved exception from its map, and remove it from the map.
194 194 static Node* clear_saved_ex_oop(SafePointNode* ex_map);
195 195
196 196 #ifdef ASSERT
197 197 // Recover a saved exception from its map, and remove it from the map.
198 198 static bool has_saved_ex_oop(SafePointNode* ex_map);
199 199 #endif
200 200
201 201 // Push an exception in the canonical position for handlers (stack(0)).
202 202 void push_ex_oop(Node* ex_oop) {
203 203 ensure_stack(1); // ensure room to push the exception
204 204 set_stack(0, ex_oop);
205 205 set_sp(1);
206 206 clean_stack(1);
207 207 }
208 208
209 209 // Detach and return an exception state.
210 210 SafePointNode* pop_exception_state() {
211 211 SafePointNode* ex_map = _exceptions;
212 212 if (ex_map != NULL) {
213 213 _exceptions = ex_map->next_exception();
214 214 ex_map->set_next_exception(NULL);
215 215 debug_only(verify_exception_state(ex_map));
216 216 }
217 217 return ex_map;
218 218 }
219 219
220 220 // Add an exception, using the given JVM state, without commoning.
221 221 void push_exception_state(SafePointNode* ex_map) {
222 222 debug_only(verify_exception_state(ex_map));
223 223 ex_map->set_next_exception(_exceptions);
224 224 _exceptions = ex_map;
225 225 }
226 226
227 227 // Turn the current JVM state into an exception state, appending the ex_oop.
228 228 SafePointNode* make_exception_state(Node* ex_oop);
229 229
230 230 // Add an exception, using the given JVM state.
231 231 // Combine all exceptions with a common exception type into a single state.
232 232 // (This is done via combine_exception_states.)
233 233 void add_exception_state(SafePointNode* ex_map);
234 234
235 235 // Combine all exceptions of any sort whatever into a single master state.
236 236 SafePointNode* combine_and_pop_all_exception_states() {
237 237 if (_exceptions == NULL) return NULL;
238 238 SafePointNode* phi_map = pop_exception_state();
239 239 SafePointNode* ex_map;
240 240 while ((ex_map = pop_exception_state()) != NULL) {
241 241 combine_exception_states(ex_map, phi_map);
242 242 }
243 243 return phi_map;
244 244 }
245 245
246 246 // Combine the two exception states, building phis as necessary.
247 247 // The second argument is updated to include contributions from the first.
248 248 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
249 249
250 250 // Reset the map to the given state. If there are any half-finished phis
251 251 // in it (created by combine_exception_states), transform them now.
252 252 // Returns the exception oop. (Caller must call push_ex_oop if required.)
253 253 Node* use_exception_state(SafePointNode* ex_map);
254 254
255 255 // Collect exceptions from a given JVM state into my exception list.
256 256 void add_exception_states_from(JVMState* jvms);
257 257
258 258 // Collect all raised exceptions into the current JVM state.
259 259 // Clear the current exception list and map, returns the combined states.
260 260 JVMState* transfer_exceptions_into_jvms();
261 261
262 262 // Helper to throw a built-in exception.
263 263 // Range checks take the offending index.
264 264 // Cast and array store checks take the offending class.
265 265 // Others do not take the optional argument.
266 266 // The JVMS must allow the bytecode to be re-executed
267 267 // via an uncommon trap.
268 268 void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
269 269
270 270 // Helper to check the JavaThread::_should_post_on_exceptions flag
271 271 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
272 272 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
273 273 bool must_throw) ;
274 274
275 275 // Helper Functions for adding debug information
276 276 void kill_dead_locals();
277 277 #ifdef ASSERT
278 278 bool dead_locals_are_killed();
279 279 #endif
280 280 // The call may deoptimize. Supply required JVM state as debug info.
281 281 // If must_throw is true, the call is guaranteed not to return normally.
282 282 void add_safepoint_edges(SafePointNode* call,
283 283 bool must_throw = false);
284 284
285 285 // How many stack inputs does the current BC consume?
286 286 // And, how does the stack change after the bytecode?
287 287 // Returns false if unknown.
288 288 bool compute_stack_effects(int& inputs, int& depth);
289 289
290 290 // Add a fixed offset to a pointer
291 291 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
292 292 return basic_plus_adr(base, ptr, MakeConX(offset));
293 293 }
294 294 Node* basic_plus_adr(Node* base, intptr_t offset) {
295 295 return basic_plus_adr(base, base, MakeConX(offset));
296 296 }
297 297 // Add a variable offset to a pointer
298 298 Node* basic_plus_adr(Node* base, Node* offset) {
299 299 return basic_plus_adr(base, base, offset);
300 300 }
301 301 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
302 302
303 303
304 304 // Some convenient shortcuts for common nodes
305 305 Node* IfTrue(IfNode* iff) { return _gvn.transform(new (C,1) IfTrueNode(iff)); }
306 306 Node* IfFalse(IfNode* iff) { return _gvn.transform(new (C,1) IfFalseNode(iff)); }
307 307
308 308 Node* AddI(Node* l, Node* r) { return _gvn.transform(new (C,3) AddINode(l, r)); }
309 309 Node* SubI(Node* l, Node* r) { return _gvn.transform(new (C,3) SubINode(l, r)); }
310 310 Node* MulI(Node* l, Node* r) { return _gvn.transform(new (C,3) MulINode(l, r)); }
311 311 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new (C,3) DivINode(ctl, l, r)); }
312 312
313 313 Node* AndI(Node* l, Node* r) { return _gvn.transform(new (C,3) AndINode(l, r)); }
314 314 Node* OrI(Node* l, Node* r) { return _gvn.transform(new (C,3) OrINode(l, r)); }
315 315 Node* XorI(Node* l, Node* r) { return _gvn.transform(new (C,3) XorINode(l, r)); }
316 316
317 317 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new (C,3) MaxINode(l, r)); }
318 318 Node* MinI(Node* l, Node* r) { return _gvn.transform(new (C,3) MinINode(l, r)); }
319 319
320 320 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) LShiftINode(l, r)); }
321 321 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) RShiftINode(l, r)); }
322 322 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) URShiftINode(l, r)); }
323 323
324 324 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpINode(l, r)); }
325 325 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpLNode(l, r)); }
326 326 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpPNode(l, r)); }
327 327 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C,2) BoolNode(cmp, relop)); }
328 328
329 329 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new (C,4) AddPNode(b, a, o)); }
330 330
331 331 // Convert between int and long, and size_t.
332 332 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
333 333 Node* ConvI2L(Node* offset);
334 334 Node* ConvL2I(Node* offset);
335 335 // Find out the klass of an object.
336 336 Node* load_object_klass(Node* object);
337 337 // Find out the length of an array.
338 338 Node* load_array_length(Node* array);
339 339 // Helper function to do a NULL pointer check or ZERO check based on type.
340 340 Node* null_check_common(Node* value, BasicType type,
341 341 bool assert_null, Node* *null_control);
342 342 // Throw an exception if a given value is null.
343 343 // Return the value cast to not-null.
344 344 // Be clever about equivalent dominating null checks.
345 345 Node* do_null_check(Node* value, BasicType type) {
346 346 return null_check_common(value, type, false, NULL);
347 347 }
348 348 // Throw an uncommon trap if a given value is __not__ null.
349 349 // Return the value cast to null, and be clever about dominating checks.
350 350 Node* do_null_assert(Node* value, BasicType type) {
351 351 return null_check_common(value, type, true, NULL);
352 352 }
353 353 // Null check oop. Return null-path control into (*null_control).
354 354 // Return a cast-not-null node which depends on the not-null control.
355 355 // If never_see_null, use an uncommon trap (*null_control sees a top).
356 356 // The cast is not valid along the null path; keep a copy of the original.
357 357 Node* null_check_oop(Node* value, Node* *null_control,
358 358 bool never_see_null = false);
359 359
360 360 // Check the null_seen bit.
361 361 bool seems_never_null(Node* obj, ciProfileData* data);
362 362
363 363 // Use the type profile to narrow an object type.
364 364 Node* maybe_cast_profiled_receiver(Node* not_null_obj,
365 365 ciProfileData* data,
366 366 ciKlass* require_klass);
367 367
368 368 // Cast obj to not-null on this path
369 369 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
370 370 // Replace all occurrences of one node by another.
371 371 void replace_in_map(Node* old, Node* neww);
372 372
373 373 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms,_sp++,n); }
374 374 Node* pop() { map_not_null(); return _map->stack(_map->_jvms,--_sp); }
375 375 Node* peek(int off=0) { map_not_null(); return _map->stack(_map->_jvms, _sp - off - 1); }
376 376
377 377 void push_pair(Node* ldval) {
378 378 push(ldval);
379 379 push(top()); // the halfword is merely a placeholder
380 380 }
381 381 void push_pair_local(int i) {
382 382 // longs are stored in locals in "push" order
383 383 push( local(i+0) ); // the real value
384 384 assert(local(i+1) == top(), "");
385 385 push(top()); // halfword placeholder
386 386 }
387 387 Node* pop_pair() {
388 388 // the second half is pushed last & popped first; it contains exactly nothing
389 389 Node* halfword = pop();
390 390 assert(halfword == top(), "");
391 391 // the long bits are pushed first & popped last:
392 392 return pop();
393 393 }
394 394 void set_pair_local(int i, Node* lval) {
395 395 // longs are stored in locals as a value/half pair (like doubles)
396 396 set_local(i+0, lval);
397 397 set_local(i+1, top());
398 398 }
399 399
400 400 // Push the node, which may be zero, one, or two words.
401 401 void push_node(BasicType n_type, Node* n) {
402 402 int n_size = type2size[n_type];
403 403 if (n_size == 1) push( n ); // T_INT, ...
404 404 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
405 405 else { assert(n_size == 0, "must be T_VOID"); }
406 406 }
407 407
408 408 Node* pop_node(BasicType n_type) {
409 409 int n_size = type2size[n_type];
410 410 if (n_size == 1) return pop();
411 411 else if (n_size == 2) return pop_pair();
412 412 else return NULL;
413 413 }
414 414
415 415 Node* control() const { return map_not_null()->control(); }
416 416 Node* i_o() const { return map_not_null()->i_o(); }
417 417 Node* returnadr() const { return map_not_null()->returnadr(); }
418 418 Node* frameptr() const { return map_not_null()->frameptr(); }
419 419 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
420 420 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
421 421 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
422 422 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
423 423 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
424 424
425 425 void set_control (Node* c) { map_not_null()->set_control(c); }
426 426 void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
427 427 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
428 428 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
429 429 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
430 430 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
431 431
432 432 // Access unaliased memory
433 433 Node* memory(uint alias_idx);
434 434 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
435 435 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
436 436
437 437 // Access immutable memory
438 438 Node* immutable_memory() { return C->immutable_memory(); }
439 439
440 440 // Set unaliased memory
441 441 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
442 442 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
443 443 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
444 444
445 445 // Get the entire memory state (probably a MergeMemNode), and reset it
446 446 // (The resetting prevents somebody from using the dangling Node pointer.)
447 447 Node* reset_memory();
448 448
449 449 // Get the entire memory state, asserted to be a MergeMemNode.
450 450 MergeMemNode* merged_memory() {
451 451 Node* mem = map_not_null()->memory();
452 452 assert(mem->is_MergeMem(), "parse memory is always pre-split");
453 453 return mem->as_MergeMem();
454 454 }
455 455
456 456 // Set the entire memory state; produce a new MergeMemNode.
457 457 void set_all_memory(Node* newmem);
458 458
459 459 // Create a memory projection from the call, then set_all_memory.
460 460 void set_all_memory_call(Node* call, bool separate_io_proj = false);
461 461
462 462 // Create a LoadNode, reading from the parser's memory state.
463 463 // (Note: require_atomic_access is useful only with T_LONG.)
464 464 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
465 465 bool require_atomic_access = false) {
466 466 // This version computes alias_index from bottom_type
467 467 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
468 468 require_atomic_access);
469 469 }
470 470 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {
471 471 // This version computes alias_index from an address type
472 472 assert(adr_type != NULL, "use other make_load factory");
473 473 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
474 474 require_atomic_access);
475 475 }
476 476 // This is the base version which is given an alias index.
477 477 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false);
478 478
479 479 // Create & transform a StoreNode and store the effect into the
480 480 // parser's memory state.
481 481 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
482 482 const TypePtr* adr_type,
483 483 bool require_atomic_access = false) {
484 484 // This version computes alias_index from an address type
485 485 assert(adr_type != NULL, "use other store_to_memory factory");
486 486 return store_to_memory(ctl, adr, val, bt,
487 487 C->get_alias_index(adr_type),
488 488 require_atomic_access);
489 489 }
490 490 // This is the base version which is given alias index
491 491 // Return the new StoreXNode
492 492 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
493 493 int adr_idx,
494 494 bool require_atomic_access = false);
495 495
496 496
497 497 // All in one pre-barrier, store, post_barrier
498 498 // Insert a write-barrier'd store. This is to let generational GC
499 499 // work; we have to flag all oop-stores before the next GC point.
500 500 //
501 501 // It comes in 3 flavors of store to an object, array, or unknown.
502 502 // We use precise card marks for arrays to avoid scanning the entire
503 503 // array. We use imprecise for object. We use precise for unknown
504 504 // since we don't know if we have an array or and object or even
505 505 // where the object starts.
506 506 //
507 507 // If val==NULL, it is taken to be a completely unknown value. QQQ
508 508
509 509 Node* store_oop(Node* ctl,
510 510 Node* obj, // containing obj
511 511 Node* adr, // actual adress to store val at
512 512 const TypePtr* adr_type,
513 513 Node* val,
514 514 const TypeOopPtr* val_type,
515 515 BasicType bt,
516 516 bool use_precise);
517 517
518 518 Node* store_oop_to_object(Node* ctl,
519 519 Node* obj, // containing obj
520 520 Node* adr, // actual adress to store val at
521 521 const TypePtr* adr_type,
522 522 Node* val,
523 523 const TypeOopPtr* val_type,
524 524 BasicType bt) {
525 525 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
526 526 }
527 527
528 528 Node* store_oop_to_array(Node* ctl,
529 529 Node* obj, // containing obj
530 530 Node* adr, // actual adress to store val at
531 531 const TypePtr* adr_type,
532 532 Node* val,
533 533 const TypeOopPtr* val_type,
534 534 BasicType bt) {
535 535 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
536 536 }
↓ open down ↓ |
524 lines elided |
↑ open up ↑ |
537 537
538 538 // Could be an array or object we don't know at compile time (unsafe ref.)
539 539 Node* store_oop_to_unknown(Node* ctl,
540 540 Node* obj, // containing obj
541 541 Node* adr, // actual adress to store val at
542 542 const TypePtr* adr_type,
543 543 Node* val,
544 544 BasicType bt);
545 545
546 546 // For the few case where the barriers need special help
547 - void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
548 - Node* val, const TypeOopPtr* val_type, BasicType bt);
547 + void pre_barrier(bool do_load, Node* ctl,
548 + Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
549 + Node* pre_val,
550 + BasicType bt);
549 551
550 552 void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
551 553 Node* val, BasicType bt, bool use_precise);
552 554
553 555 // Return addressing for an array element.
554 556 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
555 557 // Optional constraint on the array size:
556 558 const TypeInt* sizetype = NULL);
557 559
558 560 // Return a load of array element at idx.
559 561 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
560 562
561 563 //---------------- Dtrace support --------------------
562 564 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
563 565 void make_dtrace_method_entry(ciMethod* method) {
564 566 make_dtrace_method_entry_exit(method, true);
565 567 }
566 568 void make_dtrace_method_exit(ciMethod* method) {
567 569 make_dtrace_method_entry_exit(method, false);
568 570 }
569 571
570 572 //--------------- stub generation -------------------
571 573 public:
572 574 void gen_stub(address C_function,
573 575 const char *name,
574 576 int is_fancy_jump,
575 577 bool pass_tls,
576 578 bool return_pc);
577 579
578 580 //---------- help for generating calls --------------
579 581
580 582 // Do a null check on the receiver, which is in argument(0).
581 583 Node* null_check_receiver(ciMethod* callee) {
582 584 assert(!callee->is_static(), "must be a virtual method");
583 585 int nargs = 1 + callee->signature()->size();
584 586 // Null check on self without removing any arguments. The argument
585 587 // null check technically happens in the wrong place, which can lead to
586 588 // invalid stack traces when the primitive is inlined into a method
587 589 // which handles NullPointerExceptions.
588 590 Node* receiver = argument(0);
589 591 _sp += nargs;
590 592 receiver = do_null_check(receiver, T_OBJECT);
591 593 _sp -= nargs;
592 594 return receiver;
593 595 }
594 596
595 597 // Fill in argument edges for the call from argument(0), argument(1), ...
596 598 // (The next step is to call set_edges_for_java_call.)
597 599 void set_arguments_for_java_call(CallJavaNode* call);
598 600
599 601 // Fill in non-argument edges for the call.
600 602 // Transform the call, and update the basics: control, i_o, memory.
601 603 // (The next step is usually to call set_results_for_java_call.)
602 604 void set_edges_for_java_call(CallJavaNode* call,
603 605 bool must_throw = false, bool separate_io_proj = false);
604 606
605 607 // Finish up a java call that was started by set_edges_for_java_call.
606 608 // Call add_exception on any throw arising from the call.
607 609 // Return the call result (transformed).
608 610 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
609 611
610 612 // Similar to set_edges_for_java_call, but simplified for runtime calls.
611 613 void set_predefined_output_for_runtime_call(Node* call) {
612 614 set_predefined_output_for_runtime_call(call, NULL, NULL);
613 615 }
614 616 void set_predefined_output_for_runtime_call(Node* call,
615 617 Node* keep_mem,
616 618 const TypePtr* hook_mem);
617 619 Node* set_predefined_input_for_runtime_call(SafePointNode* call);
618 620
619 621 // Replace the call with the current state of the kit. Requires
620 622 // that the call was generated with separate io_projs so that
621 623 // exceptional control flow can be handled properly.
622 624 void replace_call(CallNode* call, Node* result);
623 625
624 626 // helper functions for statistics
625 627 void increment_counter(address counter_addr); // increment a debug counter
626 628 void increment_counter(Node* counter_addr); // increment a debug counter
627 629
628 630 // Bail out to the interpreter right now
629 631 // The optional klass is the one causing the trap.
630 632 // The optional reason is debug information written to the compile log.
631 633 // Optional must_throw is the same as with add_safepoint_edges.
632 634 void uncommon_trap(int trap_request,
633 635 ciKlass* klass = NULL, const char* reason_string = NULL,
634 636 bool must_throw = false, bool keep_exact_action = false);
635 637
636 638 // Shorthand, to avoid saying "Deoptimization::" so many times.
637 639 void uncommon_trap(Deoptimization::DeoptReason reason,
638 640 Deoptimization::DeoptAction action,
639 641 ciKlass* klass = NULL, const char* reason_string = NULL,
640 642 bool must_throw = false, bool keep_exact_action = false) {
641 643 uncommon_trap(Deoptimization::make_trap_request(reason, action),
642 644 klass, reason_string, must_throw, keep_exact_action);
643 645 }
644 646
645 647 // Report if there were too many traps at the current method and bci.
646 648 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
647 649 // If there is no MDO at all, report no trap unless told to assume it.
648 650 bool too_many_traps(Deoptimization::DeoptReason reason) {
649 651 return C->too_many_traps(method(), bci(), reason);
650 652 }
651 653
652 654 // Report if there were too many recompiles at the current method and bci.
653 655 bool too_many_recompiles(Deoptimization::DeoptReason reason) {
654 656 return C->too_many_recompiles(method(), bci(), reason);
655 657 }
656 658
657 659 // Returns the object (if any) which was created the moment before.
658 660 Node* just_allocated_object(Node* current_control);
659 661
660 662 static bool use_ReduceInitialCardMarks() {
661 663 return (ReduceInitialCardMarks
↓ open down ↓ |
103 lines elided |
↑ open up ↑ |
662 664 && Universe::heap()->can_elide_tlab_store_barriers());
663 665 }
664 666
665 667 void sync_kit(IdealKit& ideal);
666 668
667 669 // vanilla/CMS post barrier
668 670 void write_barrier_post(Node *store, Node* obj,
669 671 Node* adr, uint adr_idx, Node* val, bool use_precise);
670 672
671 673 // G1 pre/post barriers
672 - void g1_write_barrier_pre(Node* obj,
674 + void g1_write_barrier_pre(bool do_load,
675 + Node* obj,
673 676 Node* adr,
674 677 uint alias_idx,
675 678 Node* val,
676 679 const TypeOopPtr* val_type,
680 + Node* pre_val,
677 681 BasicType bt);
678 682
679 683 void g1_write_barrier_post(Node* store,
680 684 Node* obj,
681 685 Node* adr,
682 686 uint alias_idx,
683 687 Node* val,
684 688 BasicType bt,
685 689 bool use_precise);
686 690 // Helper function for g1
687 691 private:
688 692 void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
689 693 Node* index, Node* index_adr,
690 694 Node* buffer, const TypeFunc* tf);
691 695
692 696 public:
693 697 // Helper function to round double arguments before a call
694 698 void round_double_arguments(ciMethod* dest_method);
695 699 void round_double_result(ciMethod* dest_method);
696 700
697 701 // rounding for strict float precision conformance
698 702 Node* precision_rounding(Node* n);
699 703
700 704 // rounding for strict double precision conformance
701 705 Node* dprecision_rounding(Node* n);
702 706
703 707 // rounding for non-strict double stores
704 708 Node* dstore_rounding(Node* n);
705 709
706 710 // Helper functions for fast/slow path codes
707 711 Node* opt_iff(Node* region, Node* iff);
708 712 Node* make_runtime_call(int flags,
709 713 const TypeFunc* call_type, address call_addr,
710 714 const char* call_name,
711 715 const TypePtr* adr_type, // NULL if no memory effects
712 716 Node* parm0 = NULL, Node* parm1 = NULL,
713 717 Node* parm2 = NULL, Node* parm3 = NULL,
714 718 Node* parm4 = NULL, Node* parm5 = NULL,
715 719 Node* parm6 = NULL, Node* parm7 = NULL);
716 720 enum { // flag values for make_runtime_call
717 721 RC_NO_FP = 1, // CallLeafNoFPNode
718 722 RC_NO_IO = 2, // do not hook IO edges
719 723 RC_NO_LEAF = 4, // CallStaticJavaNode
720 724 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
721 725 RC_NARROW_MEM = 16, // input memory is same as output
722 726 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
723 727 RC_LEAF = 0 // null value: no flags set
724 728 };
725 729
726 730 // merge in all memory slices from new_mem, along the given path
727 731 void merge_memory(Node* new_mem, Node* region, int new_path);
728 732 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj);
729 733
730 734 // Helper functions to build synchronizations
731 735 int next_monitor();
732 736 Node* insert_mem_bar(int opcode, Node* precedent = NULL);
733 737 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
734 738 // Optional 'precedent' is appended as an extra edge, to force ordering.
735 739 FastLockNode* shared_lock(Node* obj);
736 740 void shared_unlock(Node* box, Node* obj);
737 741
738 742 // helper functions for the fast path/slow path idioms
739 743 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, klassOop ex_klass, Node* slow_result);
740 744
741 745 // Generate an instance-of idiom. Used by both the instance-of bytecode
742 746 // and the reflective instance-of call.
743 747 Node* gen_instanceof( Node *subobj, Node* superkls );
744 748
745 749 // Generate a check-cast idiom. Used by both the check-cast bytecode
746 750 // and the array-store bytecode
747 751 Node* gen_checkcast( Node *subobj, Node* superkls,
748 752 Node* *failure_control = NULL );
749 753
750 754 // Generate a subtyping check. Takes as input the subtype and supertype.
751 755 // Returns 2 values: sets the default control() to the true path and
752 756 // returns the false path. Only reads from constant memory taken from the
753 757 // default memory; does not write anything. It also doesn't take in an
754 758 // Object; if you wish to check an Object you need to load the Object's
755 759 // class prior to coming here.
756 760 Node* gen_subtype_check(Node* subklass, Node* superklass);
757 761
758 762 // Static parse-time type checking logic for gen_subtype_check:
759 763 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
760 764 int static_subtype_check(ciKlass* superk, ciKlass* subk);
761 765
762 766 // Exact type check used for predicted calls and casts.
763 767 // Rewrites (*casted_receiver) to be casted to the stronger type.
764 768 // (Caller is responsible for doing replace_in_map.)
765 769 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
766 770 Node* *casted_receiver);
767 771
768 772 // implementation of object creation
769 773 Node* set_output_for_allocation(AllocateNode* alloc,
770 774 const TypeOopPtr* oop_type,
771 775 bool raw_mem_only);
772 776 Node* get_layout_helper(Node* klass_node, jint& constant_value);
773 777 Node* new_instance(Node* klass_node,
774 778 Node* slow_test = NULL,
775 779 bool raw_mem_only = false,
776 780 Node* *return_size_val = NULL);
777 781 Node* new_array(Node* klass_node, Node* count_val, int nargs,
778 782 bool raw_mem_only = false, Node* *return_size_val = NULL);
779 783
780 784 // Handy for making control flow
781 785 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
782 786 IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
783 787 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
784 788 // Place 'if' on worklist if it will be in graph
785 789 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
786 790 return iff;
787 791 }
788 792
789 793 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
790 794 IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
791 795 _gvn.transform(iff); // Value may be known at parse-time
792 796 // Place 'if' on worklist if it will be in graph
793 797 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
794 798 return iff;
795 799 }
796 800
797 801 // Insert a loop predicate into the graph
798 802 void add_predicate(int nargs = 0);
799 803 void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
800 804 };
801 805
802 806 // Helper class to support building of control flow branches. Upon
803 807 // creation the map and sp at bci are cloned and restored upon de-
804 808 // struction. Typical use:
805 809 //
806 810 // { PreserveJVMState pjvms(this);
807 811 // // code of new branch
808 812 // }
809 813 // // here the JVM state at bci is established
810 814
811 815 class PreserveJVMState: public StackObj {
812 816 protected:
813 817 GraphKit* _kit;
814 818 #ifdef ASSERT
815 819 int _block; // PO of current block, if a Parse
816 820 int _bci;
817 821 #endif
818 822 SafePointNode* _map;
819 823 uint _sp;
820 824
821 825 public:
822 826 PreserveJVMState(GraphKit* kit, bool clone_map = true);
823 827 ~PreserveJVMState();
824 828 };
825 829
826 830 // Helper class to build cutouts of the form if (p) ; else {x...}.
827 831 // The code {x...} must not fall through.
828 832 // The kit's main flow of control is set to the "then" continuation of if(p).
829 833 class BuildCutout: public PreserveJVMState {
830 834 public:
831 835 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
832 836 ~BuildCutout();
833 837 };
834 838
835 839 // Helper class to preserve the original _reexecute bit and _sp and restore
836 840 // them back
837 841 class PreserveReexecuteState: public StackObj {
838 842 protected:
839 843 GraphKit* _kit;
840 844 uint _sp;
841 845 JVMState::ReexecuteState _reexecute;
842 846
843 847 public:
844 848 PreserveReexecuteState(GraphKit* kit);
845 849 ~PreserveReexecuteState();
846 850 };
847 851
848 852 #endif // SHARE_VM_OPTO_GRAPHKIT_HPP
↓ open down ↓ |
162 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX