Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/opto/graphKit.hpp
+++ new/src/share/vm/opto/graphKit.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
26 26 #define SHARE_VM_OPTO_GRAPHKIT_HPP
27 27
28 28 #include "ci/ciEnv.hpp"
29 29 #include "ci/ciMethodData.hpp"
30 30 #include "opto/addnode.hpp"
31 31 #include "opto/callnode.hpp"
32 32 #include "opto/cfgnode.hpp"
33 33 #include "opto/compile.hpp"
34 34 #include "opto/divnode.hpp"
35 35 #include "opto/mulnode.hpp"
36 36 #include "opto/phaseX.hpp"
37 37 #include "opto/subnode.hpp"
38 38 #include "opto/type.hpp"
39 39 #include "runtime/deoptimization.hpp"
40 40
41 41 class FastLockNode;
42 42 class FastUnlockNode;
43 43 class IdealKit;
44 44 class LibraryCallKit;
45 45 class Parse;
46 46 class RootNode;
47 47
48 48 //-----------------------------------------------------------------------------
49 49 //----------------------------GraphKit-----------------------------------------
50 50 // Toolkit for building the common sorts of subgraphs.
51 51 // Does not know about bytecode parsing or type-flow results.
52 52 // It is able to create graphs implementing the semantics of most
53 53 // or all bytecodes, so that it can expand intrinsics and calls.
54 54 // It may depend on JVMState structure, but it must not depend
55 55 // on specific bytecode streams.
56 56 class GraphKit : public Phase {
57 57 friend class PreserveJVMState;
58 58
59 59 protected:
60 60 ciEnv* _env; // Compilation environment
61 61 PhaseGVN &_gvn; // Some optimizations while parsing
62 62 SafePointNode* _map; // Parser map from JVM to Nodes
63 63 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
64 64 int _bci; // JVM Bytecode Pointer
65 65 ciMethod* _method; // JVM Current Method
66 66
67 67 private:
68 68 int _sp; // JVM Expression Stack Pointer; don't modify directly!
69 69
70 70 private:
71 71 SafePointNode* map_not_null() const {
72 72 assert(_map != NULL, "must call stopped() to test for reset compiler map");
73 73 return _map;
74 74 }
75 75
76 76 public:
77 77 GraphKit(); // empty constructor
78 78 GraphKit(JVMState* jvms); // the JVM state on which to operate
79 79
80 80 #ifdef ASSERT
81 81 ~GraphKit() {
82 82 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
83 83 }
84 84 #endif
85 85
86 86 virtual Parse* is_Parse() const { return NULL; }
87 87 virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
88 88
89 89 ciEnv* env() const { return _env; }
90 90 PhaseGVN& gvn() const { return _gvn; }
91 91
92 92 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
93 93
94 94 // Handy well-known nodes:
95 95 Node* null() const { return zerocon(T_OBJECT); }
96 96 Node* top() const { return C->top(); }
97 97 RootNode* root() const { return C->root(); }
98 98
99 99 // Create or find a constant node
100 100 Node* intcon(jint con) const { return _gvn.intcon(con); }
101 101 Node* longcon(jlong con) const { return _gvn.longcon(con); }
102 102 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
103 103 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
104 104 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
105 105
106 106 // Helper for byte_map_base
107 107 Node* byte_map_base_node() {
108 108 // Get base of card map
109 109 CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
110 110 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
111 111 if (ct->byte_map_base != NULL) {
112 112 return makecon(TypeRawPtr::make((address)ct->byte_map_base));
113 113 } else {
114 114 return null();
115 115 }
116 116 }
117 117
118 118 jint find_int_con(Node* n, jint value_if_unknown) {
119 119 return _gvn.find_int_con(n, value_if_unknown);
120 120 }
121 121 jlong find_long_con(Node* n, jlong value_if_unknown) {
122 122 return _gvn.find_long_con(n, value_if_unknown);
123 123 }
124 124 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
125 125
126 126 // JVM State accessors:
127 127 // Parser mapping from JVM indices into Nodes.
128 128 // Low slots are accessed by the StartNode::enum.
129 129 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
130 130 // Then come JVM stack slots.
131 131 // Finally come the monitors, if any.
132 132 // See layout accessors in class JVMState.
133 133
134 134 SafePointNode* map() const { return _map; }
135 135 bool has_exceptions() const { return _exceptions != NULL; }
136 136 JVMState* jvms() const { return map_not_null()->_jvms; }
137 137 int sp() const { return _sp; }
138 138 int bci() const { return _bci; }
139 139 Bytecodes::Code java_bc() const;
140 140 ciMethod* method() const { return _method; }
141 141
142 142 void set_jvms(JVMState* jvms) { set_map(jvms->map());
143 143 assert(jvms == this->jvms(), "sanity");
144 144 _sp = jvms->sp();
145 145 _bci = jvms->bci();
146 146 _method = jvms->has_method() ? jvms->method() : NULL; }
147 147 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
148 148 void set_sp(int sp) { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; }
149 149 void clean_stack(int from_sp); // clear garbage beyond from_sp to top
150 150
151 151 void inc_sp(int i) { set_sp(sp() + i); }
152 152 void dec_sp(int i) { set_sp(sp() - i); }
153 153 void set_bci(int bci) { _bci = bci; }
154 154
155 155 // Make sure jvms has current bci & sp.
156 156 JVMState* sync_jvms() const;
157 157 JVMState* sync_jvms_for_reexecute();
158 158
159 159 #ifdef ASSERT
160 160 // Make sure JVMS has an updated copy of bci and sp.
161 161 // Also sanity-check method, depth, and monitor depth.
162 162 bool jvms_in_sync() const;
163 163
164 164 // Make sure the map looks OK.
165 165 void verify_map() const;
166 166
167 167 // Make sure a proposed exception state looks OK.
168 168 static void verify_exception_state(SafePointNode* ex_map);
169 169 #endif
170 170
171 171 // Clone the existing map state. (Implements PreserveJVMState.)
172 172 SafePointNode* clone_map();
173 173
174 174 // Set the map to a clone of the given one.
175 175 void set_map_clone(SafePointNode* m);
176 176
177 177 // Tell if the compilation is failing.
178 178 bool failing() const { return C->failing(); }
179 179
180 180 // Set _map to NULL, signalling a stop to further bytecode execution.
181 181 // Preserve the map intact for future use, and return it back to the caller.
182 182 SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
183 183
184 184 // Stop, but first smash the map's inputs to NULL, to mark it dead.
185 185 void stop_and_kill_map();
186 186
187 187 // Tell if _map is NULL, or control is top.
188 188 bool stopped();
189 189
190 190 // Tell if this method or any caller method has exception handlers.
191 191 bool has_ex_handler();
192 192
193 193 // Save an exception without blowing stack contents or other JVM state.
194 194 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
195 195 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
196 196
197 197 // Recover a saved exception from its map.
198 198 static Node* saved_ex_oop(SafePointNode* ex_map);
199 199
200 200 // Recover a saved exception from its map, and remove it from the map.
201 201 static Node* clear_saved_ex_oop(SafePointNode* ex_map);
202 202
203 203 #ifdef ASSERT
204 204 // Recover a saved exception from its map, and remove it from the map.
205 205 static bool has_saved_ex_oop(SafePointNode* ex_map);
206 206 #endif
207 207
208 208 // Push an exception in the canonical position for handlers (stack(0)).
209 209 void push_ex_oop(Node* ex_oop) {
210 210 ensure_stack(1); // ensure room to push the exception
211 211 set_stack(0, ex_oop);
212 212 set_sp(1);
213 213 clean_stack(1);
214 214 }
215 215
216 216 // Detach and return an exception state.
217 217 SafePointNode* pop_exception_state() {
218 218 SafePointNode* ex_map = _exceptions;
219 219 if (ex_map != NULL) {
220 220 _exceptions = ex_map->next_exception();
221 221 ex_map->set_next_exception(NULL);
222 222 debug_only(verify_exception_state(ex_map));
223 223 }
224 224 return ex_map;
225 225 }
226 226
227 227 // Add an exception, using the given JVM state, without commoning.
228 228 void push_exception_state(SafePointNode* ex_map) {
229 229 debug_only(verify_exception_state(ex_map));
230 230 ex_map->set_next_exception(_exceptions);
231 231 _exceptions = ex_map;
232 232 }
233 233
234 234 // Turn the current JVM state into an exception state, appending the ex_oop.
235 235 SafePointNode* make_exception_state(Node* ex_oop);
236 236
237 237 // Add an exception, using the given JVM state.
238 238 // Combine all exceptions with a common exception type into a single state.
239 239 // (This is done via combine_exception_states.)
240 240 void add_exception_state(SafePointNode* ex_map);
241 241
242 242 // Combine all exceptions of any sort whatever into a single master state.
243 243 SafePointNode* combine_and_pop_all_exception_states() {
244 244 if (_exceptions == NULL) return NULL;
245 245 SafePointNode* phi_map = pop_exception_state();
246 246 SafePointNode* ex_map;
247 247 while ((ex_map = pop_exception_state()) != NULL) {
248 248 combine_exception_states(ex_map, phi_map);
249 249 }
250 250 return phi_map;
251 251 }
252 252
253 253 // Combine the two exception states, building phis as necessary.
254 254 // The second argument is updated to include contributions from the first.
255 255 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
256 256
257 257 // Reset the map to the given state. If there are any half-finished phis
258 258 // in it (created by combine_exception_states), transform them now.
259 259 // Returns the exception oop. (Caller must call push_ex_oop if required.)
260 260 Node* use_exception_state(SafePointNode* ex_map);
261 261
262 262 // Collect exceptions from a given JVM state into my exception list.
263 263 void add_exception_states_from(JVMState* jvms);
264 264
265 265 // Collect all raised exceptions into the current JVM state.
266 266 // Clear the current exception list and map, returns the combined states.
267 267 JVMState* transfer_exceptions_into_jvms();
268 268
269 269 // Helper to throw a built-in exception.
270 270 // Range checks take the offending index.
271 271 // Cast and array store checks take the offending class.
272 272 // Others do not take the optional argument.
273 273 // The JVMS must allow the bytecode to be re-executed
274 274 // via an uncommon trap.
275 275 void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
276 276
277 277 // Helper to check the JavaThread::_should_post_on_exceptions flag
278 278 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
279 279 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
280 280 bool must_throw) ;
281 281
282 282 // Helper Functions for adding debug information
283 283 void kill_dead_locals();
284 284 #ifdef ASSERT
285 285 bool dead_locals_are_killed();
286 286 #endif
287 287 // The call may deoptimize. Supply required JVM state as debug info.
288 288 // If must_throw is true, the call is guaranteed not to return normally.
289 289 void add_safepoint_edges(SafePointNode* call,
290 290 bool must_throw = false);
291 291
292 292 // How many stack inputs does the current BC consume?
293 293 // And, how does the stack change after the bytecode?
294 294 // Returns false if unknown.
295 295 bool compute_stack_effects(int& inputs, int& depth);
296 296
297 297 // Add a fixed offset to a pointer
298 298 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
299 299 return basic_plus_adr(base, ptr, MakeConX(offset));
300 300 }
301 301 Node* basic_plus_adr(Node* base, intptr_t offset) {
302 302 return basic_plus_adr(base, base, MakeConX(offset));
303 303 }
304 304 // Add a variable offset to a pointer
305 305 Node* basic_plus_adr(Node* base, Node* offset) {
306 306 return basic_plus_adr(base, base, offset);
307 307 }
308 308 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
309 309
310 310
311 311 // Some convenient shortcuts for common nodes
312 312 Node* IfTrue(IfNode* iff) { return _gvn.transform(new (C) IfTrueNode(iff)); }
313 313 Node* IfFalse(IfNode* iff) { return _gvn.transform(new (C) IfFalseNode(iff)); }
314 314
315 315 Node* AddI(Node* l, Node* r) { return _gvn.transform(new (C) AddINode(l, r)); }
316 316 Node* SubI(Node* l, Node* r) { return _gvn.transform(new (C) SubINode(l, r)); }
317 317 Node* MulI(Node* l, Node* r) { return _gvn.transform(new (C) MulINode(l, r)); }
318 318 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new (C) DivINode(ctl, l, r)); }
319 319
320 320 Node* AndI(Node* l, Node* r) { return _gvn.transform(new (C) AndINode(l, r)); }
321 321 Node* OrI(Node* l, Node* r) { return _gvn.transform(new (C) OrINode(l, r)); }
322 322 Node* XorI(Node* l, Node* r) { return _gvn.transform(new (C) XorINode(l, r)); }
323 323
324 324 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new (C) MaxINode(l, r)); }
325 325 Node* MinI(Node* l, Node* r) { return _gvn.transform(new (C) MinINode(l, r)); }
326 326
327 327 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new (C) LShiftINode(l, r)); }
328 328 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new (C) RShiftINode(l, r)); }
329 329 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new (C) URShiftINode(l, r)); }
330 330
↓ open down ↓ |
330 lines elided |
↑ open up ↑ |
331 331 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new (C) CmpINode(l, r)); }
332 332 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new (C) CmpLNode(l, r)); }
333 333 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new (C) CmpPNode(l, r)); }
334 334 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C) BoolNode(cmp, relop)); }
335 335
336 336 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new (C) AddPNode(b, a, o)); }
337 337
338 338 // Convert between int and long, and size_t.
339 339 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
340 340 Node* ConvI2L(Node* offset);
341 + Node* ConvI2UL(Node* offset);
341 342 Node* ConvL2I(Node* offset);
342 343 // Find out the klass of an object.
343 344 Node* load_object_klass(Node* object);
344 345 // Find out the length of an array.
345 346 Node* load_array_length(Node* array);
346 347
347 348
348 349 // Helper function to do a NULL pointer check or ZERO check based on type.
349 350 // Throw an exception if a given value is null.
350 351 // Return the value cast to not-null.
351 352 // Be clever about equivalent dominating null checks.
352 353 Node* null_check_common(Node* value, BasicType type,
353 354 bool assert_null = false, Node* *null_control = NULL);
354 355 Node* null_check(Node* value, BasicType type = T_OBJECT) {
355 356 return null_check_common(value, type);
356 357 }
357 358 Node* null_check_receiver() {
358 359 assert(argument(0)->bottom_type()->isa_ptr(), "must be");
359 360 return null_check(argument(0));
360 361 }
361 362 Node* zero_check_int(Node* value) {
362 363 assert(value->bottom_type()->basic_type() == T_INT,
363 364 err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
364 365 return null_check_common(value, T_INT);
365 366 }
366 367 Node* zero_check_long(Node* value) {
367 368 assert(value->bottom_type()->basic_type() == T_LONG,
368 369 err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
369 370 return null_check_common(value, T_LONG);
370 371 }
371 372 // Throw an uncommon trap if a given value is __not__ null.
372 373 // Return the value cast to null, and be clever about dominating checks.
373 374 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
374 375 return null_check_common(value, type, true);
375 376 }
376 377
377 378 // Null check oop. Return null-path control into (*null_control).
378 379 // Return a cast-not-null node which depends on the not-null control.
379 380 // If never_see_null, use an uncommon trap (*null_control sees a top).
380 381 // The cast is not valid along the null path; keep a copy of the original.
381 382 // If safe_for_replace, then we can replace the value with the cast
382 383 // in the parsing map (the cast is guaranteed to dominate the map)
383 384 Node* null_check_oop(Node* value, Node* *null_control,
384 385 bool never_see_null = false, bool safe_for_replace = false);
385 386
386 387 // Check the null_seen bit.
387 388 bool seems_never_null(Node* obj, ciProfileData* data);
388 389
389 390 // Check for unique class for receiver at call
390 391 ciKlass* profile_has_unique_klass() {
391 392 ciCallProfile profile = method()->call_profile_at_bci(bci());
392 393 if (profile.count() >= 0 && // no cast failures here
393 394 profile.has_receiver(0) &&
394 395 profile.morphism() == 1) {
395 396 return profile.receiver(0);
396 397 }
397 398 return NULL;
398 399 }
399 400
400 401 // record type from profiling with the type system
401 402 Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
402 403 Node* record_profiled_receiver_for_speculation(Node* n);
403 404 void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
404 405 void record_profiled_parameters_for_speculation();
405 406
406 407 // Use the type profile to narrow an object type.
407 408 Node* maybe_cast_profiled_receiver(Node* not_null_obj,
408 409 ciKlass* require_klass,
409 410 ciKlass* spec,
410 411 bool safe_for_replace);
411 412
412 413 // Cast obj to type and emit guard unless we had too many traps here already
413 414 Node* maybe_cast_profiled_obj(Node* obj,
414 415 ciKlass* type,
415 416 bool not_null = false);
416 417
417 418 // Cast obj to not-null on this path
418 419 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
419 420 // Replace all occurrences of one node by another.
420 421 void replace_in_map(Node* old, Node* neww);
421 422
422 423 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
423 424 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
424 425 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
425 426
426 427 void push_pair(Node* ldval) {
427 428 push(ldval);
428 429 push(top()); // the halfword is merely a placeholder
429 430 }
430 431 void push_pair_local(int i) {
431 432 // longs are stored in locals in "push" order
432 433 push( local(i+0) ); // the real value
433 434 assert(local(i+1) == top(), "");
434 435 push(top()); // halfword placeholder
435 436 }
436 437 Node* pop_pair() {
437 438 // the second half is pushed last & popped first; it contains exactly nothing
438 439 Node* halfword = pop();
439 440 assert(halfword == top(), "");
440 441 // the long bits are pushed first & popped last:
441 442 return pop();
442 443 }
443 444 void set_pair_local(int i, Node* lval) {
444 445 // longs are stored in locals as a value/half pair (like doubles)
445 446 set_local(i+0, lval);
446 447 set_local(i+1, top());
447 448 }
448 449
449 450 // Push the node, which may be zero, one, or two words.
450 451 void push_node(BasicType n_type, Node* n) {
451 452 int n_size = type2size[n_type];
452 453 if (n_size == 1) push( n ); // T_INT, ...
453 454 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
454 455 else { assert(n_size == 0, "must be T_VOID"); }
455 456 }
456 457
457 458 Node* pop_node(BasicType n_type) {
458 459 int n_size = type2size[n_type];
459 460 if (n_size == 1) return pop();
460 461 else if (n_size == 2) return pop_pair();
461 462 else return NULL;
462 463 }
463 464
464 465 Node* control() const { return map_not_null()->control(); }
465 466 Node* i_o() const { return map_not_null()->i_o(); }
466 467 Node* returnadr() const { return map_not_null()->returnadr(); }
467 468 Node* frameptr() const { return map_not_null()->frameptr(); }
468 469 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
469 470 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
470 471 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
471 472 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
472 473 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
473 474
474 475 void set_control (Node* c) { map_not_null()->set_control(c); }
475 476 void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
476 477 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
477 478 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
478 479 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
479 480 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
480 481
481 482 // Access unaliased memory
482 483 Node* memory(uint alias_idx);
483 484 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
484 485 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
485 486
486 487 // Access immutable memory
487 488 Node* immutable_memory() { return C->immutable_memory(); }
488 489
489 490 // Set unaliased memory
490 491 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
491 492 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
492 493 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
493 494
494 495 // Get the entire memory state (probably a MergeMemNode), and reset it
495 496 // (The resetting prevents somebody from using the dangling Node pointer.)
496 497 Node* reset_memory();
497 498
498 499 // Get the entire memory state, asserted to be a MergeMemNode.
499 500 MergeMemNode* merged_memory() {
500 501 Node* mem = map_not_null()->memory();
501 502 assert(mem->is_MergeMem(), "parse memory is always pre-split");
502 503 return mem->as_MergeMem();
503 504 }
504 505
505 506 // Set the entire memory state; produce a new MergeMemNode.
506 507 void set_all_memory(Node* newmem);
507 508
508 509 // Create a memory projection from the call, then set_all_memory.
509 510 void set_all_memory_call(Node* call, bool separate_io_proj = false);
510 511
511 512 // Create a LoadNode, reading from the parser's memory state.
512 513 // (Note: require_atomic_access is useful only with T_LONG.)
513 514 //
514 515 // We choose the unordered semantics by default because we have
515 516 // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
516 517 // of volatile fields.
517 518 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
518 519 MemNode::MemOrd mo, bool require_atomic_access = false) {
519 520 // This version computes alias_index from bottom_type
520 521 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
521 522 mo, require_atomic_access);
522 523 }
523 524 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
524 525 MemNode::MemOrd mo, bool require_atomic_access = false) {
525 526 // This version computes alias_index from an address type
526 527 assert(adr_type != NULL, "use other make_load factory");
527 528 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
528 529 mo, require_atomic_access);
529 530 }
530 531 // This is the base version which is given an alias index.
531 532 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
532 533 MemNode::MemOrd mo, bool require_atomic_access = false);
533 534
534 535 // Create & transform a StoreNode and store the effect into the
535 536 // parser's memory state.
536 537 //
537 538 // We must ensure that stores of object references will be visible
538 539 // only after the object's initialization. So the clients of this
539 540 // procedure must indicate that the store requires `release'
540 541 // semantics, if the stored value is an object reference that might
541 542 // point to a new object and may become externally visible.
542 543 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
543 544 const TypePtr* adr_type,
544 545 MemNode::MemOrd mo,
545 546 bool require_atomic_access = false) {
546 547 // This version computes alias_index from an address type
547 548 assert(adr_type != NULL, "use other store_to_memory factory");
548 549 return store_to_memory(ctl, adr, val, bt,
549 550 C->get_alias_index(adr_type),
550 551 mo, require_atomic_access);
551 552 }
552 553 // This is the base version which is given alias index
553 554 // Return the new StoreXNode
554 555 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
555 556 int adr_idx,
556 557 MemNode::MemOrd,
557 558 bool require_atomic_access = false);
558 559
559 560
560 561 // All in one pre-barrier, store, post_barrier
561 562 // Insert a write-barrier'd store. This is to let generational GC
562 563 // work; we have to flag all oop-stores before the next GC point.
563 564 //
564 565 // It comes in 3 flavors of store to an object, array, or unknown.
565 566 // We use precise card marks for arrays to avoid scanning the entire
566 567 // array. We use imprecise for object. We use precise for unknown
567 568 // since we don't know if we have an array or and object or even
568 569 // where the object starts.
569 570 //
570 571 // If val==NULL, it is taken to be a completely unknown value. QQQ
571 572
572 573 Node* store_oop(Node* ctl,
573 574 Node* obj, // containing obj
574 575 Node* adr, // actual adress to store val at
575 576 const TypePtr* adr_type,
576 577 Node* val,
577 578 const TypeOopPtr* val_type,
578 579 BasicType bt,
579 580 bool use_precise,
580 581 MemNode::MemOrd mo);
581 582
582 583 Node* store_oop_to_object(Node* ctl,
583 584 Node* obj, // containing obj
584 585 Node* adr, // actual adress to store val at
585 586 const TypePtr* adr_type,
586 587 Node* val,
587 588 const TypeOopPtr* val_type,
588 589 BasicType bt,
589 590 MemNode::MemOrd mo) {
590 591 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
591 592 }
592 593
593 594 Node* store_oop_to_array(Node* ctl,
594 595 Node* obj, // containing obj
595 596 Node* adr, // actual adress to store val at
596 597 const TypePtr* adr_type,
597 598 Node* val,
598 599 const TypeOopPtr* val_type,
599 600 BasicType bt,
600 601 MemNode::MemOrd mo) {
601 602 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
602 603 }
603 604
604 605 // Could be an array or object we don't know at compile time (unsafe ref.)
605 606 Node* store_oop_to_unknown(Node* ctl,
606 607 Node* obj, // containing obj
607 608 Node* adr, // actual adress to store val at
608 609 const TypePtr* adr_type,
609 610 Node* val,
610 611 BasicType bt,
611 612 MemNode::MemOrd mo);
612 613
613 614 // For the few case where the barriers need special help
614 615 void pre_barrier(bool do_load, Node* ctl,
615 616 Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
616 617 Node* pre_val,
617 618 BasicType bt);
618 619
619 620 void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
620 621 Node* val, BasicType bt, bool use_precise);
621 622
622 623 // Return addressing for an array element.
623 624 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
624 625 // Optional constraint on the array size:
625 626 const TypeInt* sizetype = NULL);
626 627
627 628 // Return a load of array element at idx.
628 629 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
629 630
630 631 //---------------- Dtrace support --------------------
631 632 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
632 633 void make_dtrace_method_entry(ciMethod* method) {
633 634 make_dtrace_method_entry_exit(method, true);
634 635 }
635 636 void make_dtrace_method_exit(ciMethod* method) {
636 637 make_dtrace_method_entry_exit(method, false);
637 638 }
638 639
639 640 //--------------- stub generation -------------------
640 641 public:
641 642 void gen_stub(address C_function,
642 643 const char *name,
643 644 int is_fancy_jump,
644 645 bool pass_tls,
645 646 bool return_pc);
646 647
647 648 //---------- help for generating calls --------------
648 649
649 650 // Do a null check on the receiver as it would happen before the call to
650 651 // callee (with all arguments still on the stack).
651 652 Node* null_check_receiver_before_call(ciMethod* callee) {
652 653 assert(!callee->is_static(), "must be a virtual method");
653 654 const int nargs = callee->arg_size();
654 655 inc_sp(nargs);
655 656 Node* n = null_check_receiver();
656 657 dec_sp(nargs);
657 658 return n;
658 659 }
659 660
660 661 // Fill in argument edges for the call from argument(0), argument(1), ...
661 662 // (The next step is to call set_edges_for_java_call.)
662 663 void set_arguments_for_java_call(CallJavaNode* call);
663 664
664 665 // Fill in non-argument edges for the call.
665 666 // Transform the call, and update the basics: control, i_o, memory.
666 667 // (The next step is usually to call set_results_for_java_call.)
667 668 void set_edges_for_java_call(CallJavaNode* call,
668 669 bool must_throw = false, bool separate_io_proj = false);
669 670
670 671 // Finish up a java call that was started by set_edges_for_java_call.
671 672 // Call add_exception on any throw arising from the call.
672 673 // Return the call result (transformed).
673 674 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
674 675
675 676 // Similar to set_edges_for_java_call, but simplified for runtime calls.
676 677 void set_predefined_output_for_runtime_call(Node* call) {
677 678 set_predefined_output_for_runtime_call(call, NULL, NULL);
678 679 }
679 680 void set_predefined_output_for_runtime_call(Node* call,
680 681 Node* keep_mem,
681 682 const TypePtr* hook_mem);
682 683 Node* set_predefined_input_for_runtime_call(SafePointNode* call);
683 684
684 685 // Replace the call with the current state of the kit. Requires
685 686 // that the call was generated with separate io_projs so that
686 687 // exceptional control flow can be handled properly.
687 688 void replace_call(CallNode* call, Node* result);
688 689
689 690 // helper functions for statistics
690 691 void increment_counter(address counter_addr); // increment a debug counter
691 692 void increment_counter(Node* counter_addr); // increment a debug counter
692 693
693 694 // Bail out to the interpreter right now
694 695 // The optional klass is the one causing the trap.
695 696 // The optional reason is debug information written to the compile log.
696 697 // Optional must_throw is the same as with add_safepoint_edges.
697 698 void uncommon_trap(int trap_request,
698 699 ciKlass* klass = NULL, const char* reason_string = NULL,
699 700 bool must_throw = false, bool keep_exact_action = false);
700 701
701 702 // Shorthand, to avoid saying "Deoptimization::" so many times.
702 703 void uncommon_trap(Deoptimization::DeoptReason reason,
703 704 Deoptimization::DeoptAction action,
704 705 ciKlass* klass = NULL, const char* reason_string = NULL,
705 706 bool must_throw = false, bool keep_exact_action = false) {
706 707 uncommon_trap(Deoptimization::make_trap_request(reason, action),
707 708 klass, reason_string, must_throw, keep_exact_action);
708 709 }
709 710
710 711 // SP when bytecode needs to be reexecuted.
711 712 virtual int reexecute_sp() { return sp(); }
712 713
713 714 // Report if there were too many traps at the current method and bci.
714 715 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
715 716 // If there is no MDO at all, report no trap unless told to assume it.
716 717 bool too_many_traps(Deoptimization::DeoptReason reason) {
717 718 return C->too_many_traps(method(), bci(), reason);
718 719 }
719 720
720 721 // Report if there were too many recompiles at the current method and bci.
721 722 bool too_many_recompiles(Deoptimization::DeoptReason reason) {
722 723 return C->too_many_recompiles(method(), bci(), reason);
723 724 }
724 725
725 726 // Returns the object (if any) which was created the moment before.
726 727 Node* just_allocated_object(Node* current_control);
727 728
728 729 static bool use_ReduceInitialCardMarks() {
729 730 return (ReduceInitialCardMarks
730 731 && Universe::heap()->can_elide_tlab_store_barriers());
731 732 }
732 733
733 734 // Sync Ideal and Graph kits.
734 735 void sync_kit(IdealKit& ideal);
735 736 void final_sync(IdealKit& ideal);
736 737
737 738 // vanilla/CMS post barrier
738 739 void write_barrier_post(Node *store, Node* obj,
739 740 Node* adr, uint adr_idx, Node* val, bool use_precise);
740 741
741 742 // Allow reordering of pre-barrier with oop store and/or post-barrier.
742 743 // Used for load_store operations which loads old value.
743 744 bool can_move_pre_barrier() const;
744 745
745 746 // G1 pre/post barriers
746 747 void g1_write_barrier_pre(bool do_load,
747 748 Node* obj,
748 749 Node* adr,
749 750 uint alias_idx,
750 751 Node* val,
751 752 const TypeOopPtr* val_type,
752 753 Node* pre_val,
753 754 BasicType bt);
754 755
755 756 void g1_write_barrier_post(Node* store,
756 757 Node* obj,
757 758 Node* adr,
758 759 uint alias_idx,
759 760 Node* val,
760 761 BasicType bt,
761 762 bool use_precise);
762 763 // Helper function for g1
763 764 private:
764 765 void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
765 766 Node* index, Node* index_adr,
766 767 Node* buffer, const TypeFunc* tf);
767 768
768 769 public:
769 770 // Helper function to round double arguments before a call
770 771 void round_double_arguments(ciMethod* dest_method);
771 772 void round_double_result(ciMethod* dest_method);
772 773
773 774 // rounding for strict float precision conformance
774 775 Node* precision_rounding(Node* n);
775 776
776 777 // rounding for strict double precision conformance
777 778 Node* dprecision_rounding(Node* n);
778 779
779 780 // rounding for non-strict double stores
780 781 Node* dstore_rounding(Node* n);
781 782
782 783 // Helper functions for fast/slow path codes
783 784 Node* opt_iff(Node* region, Node* iff);
784 785 Node* make_runtime_call(int flags,
785 786 const TypeFunc* call_type, address call_addr,
786 787 const char* call_name,
787 788 const TypePtr* adr_type, // NULL if no memory effects
788 789 Node* parm0 = NULL, Node* parm1 = NULL,
789 790 Node* parm2 = NULL, Node* parm3 = NULL,
790 791 Node* parm4 = NULL, Node* parm5 = NULL,
791 792 Node* parm6 = NULL, Node* parm7 = NULL);
792 793 enum { // flag values for make_runtime_call
793 794 RC_NO_FP = 1, // CallLeafNoFPNode
794 795 RC_NO_IO = 2, // do not hook IO edges
795 796 RC_NO_LEAF = 4, // CallStaticJavaNode
796 797 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
797 798 RC_NARROW_MEM = 16, // input memory is same as output
798 799 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
799 800 RC_LEAF = 0 // null value: no flags set
800 801 };
801 802
802 803 // merge in all memory slices from new_mem, along the given path
803 804 void merge_memory(Node* new_mem, Node* region, int new_path);
804 805 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj);
805 806
806 807 // Helper functions to build synchronizations
807 808 int next_monitor();
808 809 Node* insert_mem_bar(int opcode, Node* precedent = NULL);
809 810 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
810 811 // Optional 'precedent' is appended as an extra edge, to force ordering.
811 812 FastLockNode* shared_lock(Node* obj);
812 813 void shared_unlock(Node* box, Node* obj);
813 814
814 815 // helper functions for the fast path/slow path idioms
815 816 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
816 817
817 818 // Generate an instance-of idiom. Used by both the instance-of bytecode
818 819 // and the reflective instance-of call.
819 820 Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
820 821
821 822 // Generate a check-cast idiom. Used by both the check-cast bytecode
822 823 // and the array-store bytecode
823 824 Node* gen_checkcast( Node *subobj, Node* superkls,
824 825 Node* *failure_control = NULL );
825 826
826 827 // Generate a subtyping check. Takes as input the subtype and supertype.
827 828 // Returns 2 values: sets the default control() to the true path and
828 829 // returns the false path. Only reads from constant memory taken from the
829 830 // default memory; does not write anything. It also doesn't take in an
830 831 // Object; if you wish to check an Object you need to load the Object's
831 832 // class prior to coming here.
832 833 Node* gen_subtype_check(Node* subklass, Node* superklass);
833 834
834 835 // Static parse-time type checking logic for gen_subtype_check:
835 836 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
836 837 int static_subtype_check(ciKlass* superk, ciKlass* subk);
837 838
838 839 // Exact type check used for predicted calls and casts.
839 840 // Rewrites (*casted_receiver) to be casted to the stronger type.
840 841 // (Caller is responsible for doing replace_in_map.)
841 842 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
842 843 Node* *casted_receiver);
843 844
844 845 // implementation of object creation
845 846 Node* set_output_for_allocation(AllocateNode* alloc,
846 847 const TypeOopPtr* oop_type);
847 848 Node* get_layout_helper(Node* klass_node, jint& constant_value);
848 849 Node* new_instance(Node* klass_node,
849 850 Node* slow_test = NULL,
850 851 Node* *return_size_val = NULL);
851 852 Node* new_array(Node* klass_node, Node* count_val, int nargs,
852 853 Node* *return_size_val = NULL);
853 854
854 855 // java.lang.String helpers
855 856 Node* load_String_offset(Node* ctrl, Node* str);
856 857 Node* load_String_length(Node* ctrl, Node* str);
857 858 Node* load_String_value(Node* ctrl, Node* str);
858 859 void store_String_offset(Node* ctrl, Node* str, Node* value);
859 860 void store_String_length(Node* ctrl, Node* str, Node* value);
860 861 void store_String_value(Node* ctrl, Node* str, Node* value);
861 862
862 863 // Handy for making control flow
863 864 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
864 865 IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
865 866 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
866 867 // Place 'if' on worklist if it will be in graph
867 868 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
868 869 return iff;
869 870 }
870 871
871 872 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
872 873 IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
873 874 _gvn.transform(iff); // Value may be known at parse-time
874 875 // Place 'if' on worklist if it will be in graph
875 876 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
876 877 return iff;
877 878 }
878 879
879 880 // Insert a loop predicate into the graph
880 881 void add_predicate(int nargs = 0);
881 882 void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
882 883
883 884 // Produce new array node of stable type
884 885 Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
885 886 };
886 887
887 888 // Helper class to support building of control flow branches. Upon
888 889 // creation the map and sp at bci are cloned and restored upon de-
889 890 // struction. Typical use:
890 891 //
891 892 // { PreserveJVMState pjvms(this);
892 893 // // code of new branch
893 894 // }
894 895 // // here the JVM state at bci is established
895 896
896 897 class PreserveJVMState: public StackObj {
897 898 protected:
898 899 GraphKit* _kit;
899 900 #ifdef ASSERT
900 901 int _block; // PO of current block, if a Parse
901 902 int _bci;
902 903 #endif
903 904 SafePointNode* _map;
904 905 uint _sp;
905 906
906 907 public:
907 908 PreserveJVMState(GraphKit* kit, bool clone_map = true);
908 909 ~PreserveJVMState();
909 910 };
910 911
911 912 // Helper class to build cutouts of the form if (p) ; else {x...}.
912 913 // The code {x...} must not fall through.
913 914 // The kit's main flow of control is set to the "then" continuation of if(p).
914 915 class BuildCutout: public PreserveJVMState {
915 916 public:
916 917 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
917 918 ~BuildCutout();
918 919 };
919 920
920 921 // Helper class to preserve the original _reexecute bit and _sp and restore
921 922 // them back
922 923 class PreserveReexecuteState: public StackObj {
923 924 protected:
924 925 GraphKit* _kit;
925 926 uint _sp;
926 927 JVMState::ReexecuteState _reexecute;
927 928
928 929 public:
929 930 PreserveReexecuteState(GraphKit* kit);
930 931 ~PreserveReexecuteState();
931 932 };
932 933
933 934 #endif // SHARE_VM_OPTO_GRAPHKIT_HPP
↓ open down ↓ |
583 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX