Print this page
rev 7082 : 8068881: SIGBUS in C2 compiled method weblogic.wsee.jaxws.framework.jaxrpc.EnvironmentFactory$SimulatedWsdlDefinitions.<init>
Summary: Use MachMerge to hook together defs of the same multidef value in a block
Reviewed-by: kvn, vlivanov
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/hotspot/src/share/vm/opto/chaitin.cpp
+++ new/hotspot/src/share/vm/opto/chaitin.cpp
1 1 /*
2 2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "compiler/compileLog.hpp"
27 27 #include "compiler/oopMap.hpp"
28 28 #include "memory/allocation.inline.hpp"
29 29 #include "opto/addnode.hpp"
30 30 #include "opto/block.hpp"
31 31 #include "opto/callnode.hpp"
32 32 #include "opto/cfgnode.hpp"
33 33 #include "opto/chaitin.hpp"
34 34 #include "opto/coalesce.hpp"
35 35 #include "opto/connode.hpp"
36 36 #include "opto/idealGraphPrinter.hpp"
37 37 #include "opto/indexSet.hpp"
38 38 #include "opto/machnode.hpp"
39 39 #include "opto/memnode.hpp"
40 40 #include "opto/opcodes.hpp"
41 41 #include "opto/rootnode.hpp"
42 42
43 43 #ifndef PRODUCT
44 44 void LRG::dump() const {
45 45 ttyLocker ttyl;
46 46 tty->print("%d ",num_regs());
47 47 _mask.dump();
48 48 if( _msize_valid ) {
49 49 if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
50 50 else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
51 51 } else {
52 52 tty->print(", #?(%d) ",_mask.Size());
53 53 }
54 54
55 55 tty->print("EffDeg: ");
56 56 if( _degree_valid ) tty->print( "%d ", _eff_degree );
57 57 else tty->print("? ");
58 58
59 59 if( is_multidef() ) {
60 60 tty->print("MultiDef ");
61 61 if (_defs != NULL) {
62 62 tty->print("(");
63 63 for (int i = 0; i < _defs->length(); i++) {
64 64 tty->print("N%d ", _defs->at(i)->_idx);
65 65 }
66 66 tty->print(") ");
67 67 }
68 68 }
69 69 else if( _def == 0 ) tty->print("Dead ");
70 70 else tty->print("Def: N%d ",_def->_idx);
71 71
72 72 tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score());
73 73 // Flags
74 74 if( _is_oop ) tty->print("Oop ");
75 75 if( _is_float ) tty->print("Float ");
76 76 if( _is_vector ) tty->print("Vector ");
77 77 if( _was_spilled1 ) tty->print("Spilled ");
78 78 if( _was_spilled2 ) tty->print("Spilled2 ");
79 79 if( _direct_conflict ) tty->print("Direct_conflict ");
80 80 if( _fat_proj ) tty->print("Fat ");
81 81 if( _was_lo ) tty->print("Lo ");
82 82 if( _has_copy ) tty->print("Copy ");
83 83 if( _at_risk ) tty->print("Risk ");
84 84
85 85 if( _must_spill ) tty->print("Must_spill ");
86 86 if( _is_bound ) tty->print("Bound ");
87 87 if( _msize_valid ) {
88 88 if( _degree_valid && lo_degree() ) tty->print("Trivial ");
89 89 }
90 90
91 91 tty->cr();
92 92 }
93 93 #endif
94 94
95 95 // Compute score from cost and area. Low score is best to spill.
96 96 static double raw_score( double cost, double area ) {
97 97 return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
98 98 }
99 99
100 100 double LRG::score() const {
101 101 // Scale _area by RegisterCostAreaRatio/64K then subtract from cost.
102 102 // Bigger area lowers score, encourages spilling this live range.
103 103 // Bigger cost raise score, prevents spilling this live range.
104 104 // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer
105 105 // to turn a divide by a constant into a multiply by the reciprical).
106 106 double score = raw_score( _cost, _area);
107 107
108 108 // Account for area. Basically, LRGs covering large areas are better
109 109 // to spill because more other LRGs get freed up.
110 110 if( _area == 0.0 ) // No area? Then no progress to spill
111 111 return 1e35;
112 112
113 113 if( _was_spilled2 ) // If spilled once before, we are unlikely
114 114 return score + 1e30; // to make progress again.
115 115
116 116 if( _cost >= _area*3.0 ) // Tiny area relative to cost
117 117 return score + 1e17; // Probably no progress to spill
118 118
119 119 if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost
120 120 return score + 1e10; // Likely no progress to spill
121 121
122 122 return score;
123 123 }
124 124
125 125 #define NUMBUCKS 3
126 126
127 127 // Straight out of Tarjan's union-find algorithm
128 128 uint LiveRangeMap::find_compress(uint lrg) {
129 129 uint cur = lrg;
130 130 uint next = _uf_map.at(cur);
131 131 while (next != cur) { // Scan chain of equivalences
132 132 assert( next < cur, "always union smaller");
133 133 cur = next; // until find a fixed-point
134 134 next = _uf_map.at(cur);
135 135 }
136 136
137 137 // Core of union-find algorithm: update chain of
138 138 // equivalences to be equal to the root.
139 139 while (lrg != next) {
140 140 uint tmp = _uf_map.at(lrg);
141 141 _uf_map.at_put(lrg, next);
142 142 lrg = tmp;
143 143 }
144 144 return lrg;
145 145 }
146 146
147 147 // Reset the Union-Find map to identity
148 148 void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
149 149 _max_lrg_id= max_lrg_id;
150 150 // Force the Union-Find mapping to be at least this large
151 151 _uf_map.at_put_grow(_max_lrg_id, 0);
152 152 // Initialize it to be the ID mapping.
153 153 for (uint i = 0; i < _max_lrg_id; ++i) {
154 154 _uf_map.at_put(i, i);
155 155 }
156 156 }
157 157
158 158 // Make all Nodes map directly to their final live range; no need for
159 159 // the Union-Find mapping after this call.
160 160 void LiveRangeMap::compress_uf_map_for_nodes() {
161 161 // For all Nodes, compress mapping
162 162 uint unique = _names.length();
163 163 for (uint i = 0; i < unique; ++i) {
164 164 uint lrg = _names.at(i);
165 165 uint compressed_lrg = find(lrg);
166 166 if (lrg != compressed_lrg) {
167 167 _names.at_put(i, compressed_lrg);
168 168 }
169 169 }
170 170 }
171 171
172 172 // Like Find above, but no path compress, so bad asymptotic behavior
173 173 uint LiveRangeMap::find_const(uint lrg) const {
174 174 if (!lrg) {
175 175 return lrg; // Ignore the zero LRG
176 176 }
177 177
178 178 // Off the end? This happens during debugging dumps when you got
179 179 // brand new live ranges but have not told the allocator yet.
180 180 if (lrg >= _max_lrg_id) {
181 181 return lrg;
182 182 }
183 183
184 184 uint next = _uf_map.at(lrg);
185 185 while (next != lrg) { // Scan chain of equivalences
186 186 assert(next < lrg, "always union smaller");
187 187 lrg = next; // until find a fixed-point
188 188 next = _uf_map.at(lrg);
189 189 }
190 190 return next;
191 191 }
192 192
193 193 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
194 194 : PhaseRegAlloc(unique, cfg, matcher,
195 195 #ifndef PRODUCT
196 196 print_chaitin_statistics
197 197 #else
198 198 NULL
199 199 #endif
200 200 )
201 201 , _lrg_map(Thread::current()->resource_area(), unique)
202 202 , _live(0)
203 203 , _spilled_once(Thread::current()->resource_area())
204 204 , _spilled_twice(Thread::current()->resource_area())
205 205 , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
206 206 , _oldphi(unique)
207 207 #ifndef PRODUCT
208 208 , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
209 209 #endif
210 210 {
211 211 NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
212 212
213 213 _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
214 214
215 215 // Build a list of basic blocks, sorted by frequency
216 216 _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
217 217 // Experiment with sorting strategies to speed compilation
218 218 double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
219 219 Block **buckets[NUMBUCKS]; // Array of buckets
220 220 uint buckcnt[NUMBUCKS]; // Array of bucket counters
221 221 double buckval[NUMBUCKS]; // Array of bucket value cutoffs
222 222 for (uint i = 0; i < NUMBUCKS; i++) {
223 223 buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
224 224 buckcnt[i] = 0;
225 225 // Bump by three orders of magnitude each time
226 226 cutoff *= 0.001;
227 227 buckval[i] = cutoff;
228 228 for (uint j = 0; j < _cfg.number_of_blocks(); j++) {
229 229 buckets[i][j] = NULL;
230 230 }
231 231 }
232 232 // Sort blocks into buckets
233 233 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
234 234 for (uint j = 0; j < NUMBUCKS; j++) {
235 235 if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) {
236 236 // Assign block to end of list for appropriate bucket
237 237 buckets[j][buckcnt[j]++] = _cfg.get_block(i);
238 238 break; // kick out of inner loop
239 239 }
240 240 }
241 241 }
242 242 // Dump buckets into final block array
243 243 uint blkcnt = 0;
244 244 for (uint i = 0; i < NUMBUCKS; i++) {
245 245 for (uint j = 0; j < buckcnt[i]; j++) {
246 246 _blks[blkcnt++] = buckets[i][j];
247 247 }
248 248 }
249 249
250 250 assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled");
251 251 }
252 252
253 253 // union 2 sets together.
254 254 void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
255 255 uint src = _lrg_map.find(src_n);
256 256 uint dst = _lrg_map.find(dst_n);
257 257 assert(src, "");
258 258 assert(dst, "");
259 259 assert(src < _lrg_map.max_lrg_id(), "oob");
260 260 assert(dst < _lrg_map.max_lrg_id(), "oob");
261 261 assert(src < dst, "always union smaller");
262 262 _lrg_map.uf_map(dst, src);
263 263 }
264 264
265 265 void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
266 266 // Make the Node->LRG mapping
267 267 _lrg_map.extend(x->_idx,lrg);
268 268 // Make the Union-Find mapping an identity function
269 269 _lrg_map.uf_extend(lrg, lrg);
270 270 }
271 271
272 272
273 273 int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) {
274 274 assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections");
275 275 DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); )
276 276 int found_projs = 0;
277 277 uint cnt = orig->outcnt();
278 278 for (uint i = 0; i < cnt; i++) {
279 279 Node* proj = orig->raw_out(i);
280 280 if (proj->is_MachProj()) {
281 281 assert(proj->outcnt() == 0, "only kill projections are expected here");
282 282 assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections");
283 283 found_projs++;
284 284 // Copy kill projections after the cloned node
285 285 Node* kills = proj->clone();
286 286 kills->set_req(0, copy);
287 287 b->insert_node(kills, idx++);
288 288 _cfg.map_node_to_block(kills, b);
289 289 new_lrg(kills, max_lrg_id++);
290 290 }
291 291 }
292 292 return found_projs;
293 293 }
294 294
295 295 // Renumber the live ranges to compact them. Makes the IFG smaller.
296 296 void PhaseChaitin::compact() {
297 297 // Current the _uf_map contains a series of short chains which are headed
298 298 // by a self-cycle. All the chains run from big numbers to little numbers.
299 299 // The Find() call chases the chains & shortens them for the next Find call.
300 300 // We are going to change this structure slightly. Numbers above a moving
301 301 // wave 'i' are unchanged. Numbers below 'j' point directly to their
302 302 // compacted live range with no further chaining. There are no chains or
303 303 // cycles below 'i', so the Find call no longer works.
304 304 uint j=1;
305 305 uint i;
306 306 for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
307 307 uint lr = _lrg_map.uf_live_range_id(i);
308 308 // Ignore unallocated live ranges
309 309 if (!lr) {
310 310 continue;
311 311 }
312 312 assert(lr <= i, "");
313 313 _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
314 314 }
315 315 // Now change the Node->LR mapping to reflect the compacted names
316 316 uint unique = _lrg_map.size();
317 317 for (i = 0; i < unique; i++) {
318 318 uint lrg_id = _lrg_map.live_range_id(i);
319 319 _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
320 320 }
321 321
322 322 // Reset the Union-Find mapping
323 323 _lrg_map.reset_uf_map(j);
324 324 }
325 325
326 326 void PhaseChaitin::Register_Allocate() {
327 327
328 328 // Above the OLD FP (and in registers) are the incoming arguments. Stack
329 329 // slots in this area are called "arg_slots". Above the NEW FP (and in
330 330 // registers) is the outgoing argument area; above that is the spill/temp
331 331 // area. These are all "frame_slots". Arg_slots start at the zero
332 332 // stack_slots and count up to the known arg_size. Frame_slots start at
333 333 // the stack_slot #arg_size and go up. After allocation I map stack
334 334 // slots to actual offsets. Stack-slots in the arg_slot area are biased
335 335 // by the frame_size; stack-slots in the frame_slot area are biased by 0.
336 336
337 337 _trip_cnt = 0;
338 338 _alternate = 0;
339 339 _matcher._allocation_started = true;
340 340
341 341 ResourceArea split_arena; // Arena for Split local resources
342 342 ResourceArea live_arena; // Arena for liveness & IFG info
343 343 ResourceMark rm(&live_arena);
344 344
345 345 // Need live-ness for the IFG; need the IFG for coalescing. If the
346 346 // liveness is JUST for coalescing, then I can get some mileage by renaming
347 347 // all copy-related live ranges low and then using the max copy-related
348 348 // live range as a cut-off for LIVE and the IFG. In other words, I can
349 349 // build a subset of LIVE and IFG just for copies.
350 350 PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
351 351
352 352 // Need IFG for coalescing and coloring
353 353 PhaseIFG ifg(&live_arena);
354 354 _ifg = &ifg;
355 355
356 356 // Come out of SSA world to the Named world. Assign (virtual) registers to
357 357 // Nodes. Use the same register for all inputs and the output of PhiNodes
358 358 // - effectively ending SSA form. This requires either coalescing live
359 359 // ranges or inserting copies. For the moment, we insert "virtual copies"
360 360 // - we pretend there is a copy prior to each Phi in predecessor blocks.
361 361 // We will attempt to coalesce such "virtual copies" before we manifest
362 362 // them for real.
363 363 de_ssa();
364 364
365 365 #ifdef ASSERT
366 366 // Veify the graph before RA.
367 367 verify(&live_arena);
368 368 #endif
369 369
370 370 {
371 371 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
372 372 _live = NULL; // Mark live as being not available
373 373 rm.reset_to_mark(); // Reclaim working storage
374 374 IndexSet::reset_memory(C, &live_arena);
375 375 ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
376 376 gather_lrg_masks( false ); // Collect LRG masks
377 377 live.compute(_lrg_map.max_lrg_id()); // Compute liveness
378 378 _live = &live; // Mark LIVE as being available
379 379 }
380 380
381 381 // Base pointers are currently "used" by instructions which define new
382 382 // derived pointers. This makes base pointers live up to the where the
383 383 // derived pointer is made, but not beyond. Really, they need to be live
384 384 // across any GC point where the derived value is live. So this code looks
385 385 // at all the GC points, and "stretches" the live range of any base pointer
386 386 // to the GC point.
387 387 if (stretch_base_pointer_live_ranges(&live_arena)) {
388 388 NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);)
389 389 // Since some live range stretched, I need to recompute live
390 390 _live = NULL;
391 391 rm.reset_to_mark(); // Reclaim working storage
392 392 IndexSet::reset_memory(C, &live_arena);
393 393 ifg.init(_lrg_map.max_lrg_id());
394 394 gather_lrg_masks(false);
395 395 live.compute(_lrg_map.max_lrg_id());
396 396 _live = &live;
397 397 }
398 398 // Create the interference graph using virtual copies
399 399 build_ifg_virtual(); // Include stack slots this time
400 400
401 401 // Aggressive (but pessimistic) copy coalescing.
402 402 // This pass works on virtual copies. Any virtual copies which are not
403 403 // coalesced get manifested as actual copies
404 404 {
405 405 // The IFG is/was triangular. I am 'squaring it up' so Union can run
406 406 // faster. Union requires a 'for all' operation which is slow on the
407 407 // triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
408 408 // meaning I can visit all the Nodes neighbors less than a Node in time
409 409 // O(# of neighbors), but I have to visit all the Nodes greater than a
410 410 // given Node and search them for an instance, i.e., time O(#MaxLRG)).
411 411 _ifg->SquareUp();
412 412
413 413 PhaseAggressiveCoalesce coalesce(*this);
414 414 coalesce.coalesce_driver();
415 415 // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do
416 416 // not match the Phi itself, insert a copy.
417 417 coalesce.insert_copies(_matcher);
418 418 if (C->failing()) {
419 419 return;
420 420 }
421 421 }
422 422
423 423 // After aggressive coalesce, attempt a first cut at coloring.
424 424 // To color, we need the IFG and for that we need LIVE.
425 425 {
426 426 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
427 427 _live = NULL;
428 428 rm.reset_to_mark(); // Reclaim working storage
429 429 IndexSet::reset_memory(C, &live_arena);
430 430 ifg.init(_lrg_map.max_lrg_id());
431 431 gather_lrg_masks( true );
432 432 live.compute(_lrg_map.max_lrg_id());
433 433 _live = &live;
434 434 }
435 435
436 436 // Build physical interference graph
437 437 uint must_spill = 0;
438 438 must_spill = build_ifg_physical(&live_arena);
439 439 // If we have a guaranteed spill, might as well spill now
440 440 if (must_spill) {
441 441 if(!_lrg_map.max_lrg_id()) {
442 442 return;
443 443 }
444 444 // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
445 445 C->check_node_count(10*must_spill, "out of nodes before split");
446 446 if (C->failing()) {
447 447 return;
448 448 }
449 449
450 450 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere
451 451 _lrg_map.set_max_lrg_id(new_max_lrg_id);
452 452 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
453 453 // or we failed to split
454 454 C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
455 455 if (C->failing()) {
456 456 return;
457 457 }
458 458
459 459 NOT_PRODUCT(C->verify_graph_edges();)
460 460
461 461 compact(); // Compact LRGs; return new lower max lrg
462 462
463 463 {
464 464 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
465 465 _live = NULL;
466 466 rm.reset_to_mark(); // Reclaim working storage
467 467 IndexSet::reset_memory(C, &live_arena);
468 468 ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
469 469 gather_lrg_masks( true ); // Collect intersect mask
470 470 live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
471 471 _live = &live;
472 472 }
473 473 build_ifg_physical(&live_arena);
474 474 _ifg->SquareUp();
475 475 _ifg->Compute_Effective_Degree();
476 476 // Only do conservative coalescing if requested
477 477 if (OptoCoalesce) {
478 478 // Conservative (and pessimistic) copy coalescing of those spills
479 479 PhaseConservativeCoalesce coalesce(*this);
480 480 // If max live ranges greater than cutoff, don't color the stack.
481 481 // This cutoff can be larger than below since it is only done once.
482 482 coalesce.coalesce_driver();
483 483 }
484 484 _lrg_map.compress_uf_map_for_nodes();
485 485
486 486 #ifdef ASSERT
487 487 verify(&live_arena, true);
488 488 #endif
489 489 } else {
490 490 ifg.SquareUp();
491 491 ifg.Compute_Effective_Degree();
492 492 #ifdef ASSERT
493 493 set_was_low();
494 494 #endif
495 495 }
496 496
497 497 // Prepare for Simplify & Select
498 498 cache_lrg_info(); // Count degree of LRGs
499 499
500 500 // Simplify the InterFerence Graph by removing LRGs of low degree.
501 501 // LRGs of low degree are trivially colorable.
502 502 Simplify();
503 503
504 504 // Select colors by re-inserting LRGs back into the IFG in reverse order.
505 505 // Return whether or not something spills.
506 506 uint spills = Select( );
507 507
508 508 // If we spill, split and recycle the entire thing
509 509 while( spills ) {
510 510 if( _trip_cnt++ > 24 ) {
511 511 DEBUG_ONLY( dump_for_spill_split_recycle(); )
512 512 if( _trip_cnt > 27 ) {
513 513 C->record_method_not_compilable("failed spill-split-recycle sanity check");
514 514 return;
515 515 }
516 516 }
517 517
518 518 if (!_lrg_map.max_lrg_id()) {
519 519 return;
520 520 }
521 521 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere
522 522 _lrg_map.set_max_lrg_id(new_max_lrg_id);
523 523 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
524 524 C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
525 525 if (C->failing()) {
526 526 return;
527 527 }
528 528
529 529 compact(); // Compact LRGs; return new lower max lrg
530 530
531 531 // Nuke the live-ness and interference graph and LiveRanGe info
532 532 {
533 533 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
534 534 _live = NULL;
535 535 rm.reset_to_mark(); // Reclaim working storage
536 536 IndexSet::reset_memory(C, &live_arena);
537 537 ifg.init(_lrg_map.max_lrg_id());
538 538
539 539 // Create LiveRanGe array.
540 540 // Intersect register masks for all USEs and DEFs
541 541 gather_lrg_masks(true);
542 542 live.compute(_lrg_map.max_lrg_id());
543 543 _live = &live;
544 544 }
545 545 must_spill = build_ifg_physical(&live_arena);
546 546 _ifg->SquareUp();
547 547 _ifg->Compute_Effective_Degree();
548 548
549 549 // Only do conservative coalescing if requested
550 550 if (OptoCoalesce) {
551 551 // Conservative (and pessimistic) copy coalescing
552 552 PhaseConservativeCoalesce coalesce(*this);
553 553 // Check for few live ranges determines how aggressive coalesce is.
554 554 coalesce.coalesce_driver();
555 555 }
556 556 _lrg_map.compress_uf_map_for_nodes();
557 557 #ifdef ASSERT
558 558 verify(&live_arena, true);
559 559 #endif
560 560 cache_lrg_info(); // Count degree of LRGs
561 561
562 562 // Simplify the InterFerence Graph by removing LRGs of low degree.
563 563 // LRGs of low degree are trivially colorable.
564 564 Simplify();
565 565
566 566 // Select colors by re-inserting LRGs back into the IFG in reverse order.
567 567 // Return whether or not something spills.
↓ open down ↓ |
567 lines elided |
↑ open up ↑ |
568 568 spills = Select();
569 569 }
570 570
571 571 // Count number of Simplify-Select trips per coloring success.
572 572 _allocator_attempts += _trip_cnt + 1;
573 573 _allocator_successes += 1;
574 574
575 575 // Peephole remove copies
576 576 post_allocate_copy_removal();
577 577
578 + // Merge multidefs if multiple defs representing the same value are used in a single block.
579 + merge_multidefs();
580 +
578 581 #ifdef ASSERT
579 582 // Veify the graph after RA.
580 583 verify(&live_arena);
581 584 #endif
582 585
583 586 // max_reg is past the largest *register* used.
584 587 // Convert that to a frame_slot number.
585 588 if (_max_reg <= _matcher._new_SP) {
586 589 _framesize = C->out_preserve_stack_slots();
587 590 }
588 591 else {
589 592 _framesize = _max_reg -_matcher._new_SP;
590 593 }
591 594 assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
592 595
593 596 // This frame must preserve the required fp alignment
594 597 _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots());
595 598 assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
596 599 #ifndef PRODUCT
597 600 _total_framesize += _framesize;
598 601 if ((int)_framesize > _max_framesize) {
599 602 _max_framesize = _framesize;
600 603 }
601 604 #endif
602 605
603 606 // Convert CISC spills
604 607 fixup_spills();
605 608
606 609 // Log regalloc results
607 610 CompileLog* log = Compile::current()->log();
608 611 if (log != NULL) {
609 612 log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
610 613 }
611 614
612 615 if (C->failing()) {
613 616 return;
614 617 }
615 618
616 619 NOT_PRODUCT(C->verify_graph_edges();)
617 620
618 621 // Move important info out of the live_arena to longer lasting storage.
619 622 alloc_node_regs(_lrg_map.size());
620 623 for (uint i=0; i < _lrg_map.size(); i++) {
621 624 if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
622 625 LRG &lrg = lrgs(_lrg_map.live_range_id(i));
623 626 if (!lrg.alive()) {
624 627 set_bad(i);
625 628 } else if (lrg.num_regs() == 1) {
626 629 set1(i, lrg.reg());
627 630 } else { // Must be a register-set
628 631 if (!lrg._fat_proj) { // Must be aligned adjacent register set
629 632 // Live ranges record the highest register in their mask.
630 633 // We want the low register for the AD file writer's convenience.
631 634 OptoReg::Name hi = lrg.reg(); // Get hi register
632 635 OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo
633 636 // We have to use pair [lo,lo+1] even for wide vectors because
634 637 // the rest of code generation works only with pairs. It is safe
635 638 // since for registers encoding only 'lo' is used.
636 639 // Second reg from pair is used in ScheduleAndBundle on SPARC where
637 640 // vector max size is 8 which corresponds to registers pair.
638 641 // It is also used in BuildOopMaps but oop operations are not
639 642 // vectorized.
640 643 set2(i, lo);
641 644 } else { // Misaligned; extract 2 bits
642 645 OptoReg::Name hi = lrg.reg(); // Get hi register
643 646 lrg.Remove(hi); // Yank from mask
644 647 int lo = lrg.mask().find_first_elem(); // Find lo
645 648 set_pair(i, hi, lo);
646 649 }
647 650 }
648 651 if( lrg._is_oop ) _node_oops.set(i);
649 652 } else {
650 653 set_bad(i);
651 654 }
652 655 }
653 656
654 657 // Done!
655 658 _live = NULL;
656 659 _ifg = NULL;
657 660 C->set_indexSet_arena(NULL); // ResourceArea is at end of scope
658 661 }
659 662
660 663 void PhaseChaitin::de_ssa() {
661 664 // Set initial Names for all Nodes. Most Nodes get the virtual register
662 665 // number. A few get the ZERO live range number. These do not
663 666 // get allocated, but instead rely on correct scheduling to ensure that
664 667 // only one instance is simultaneously live at a time.
665 668 uint lr_counter = 1;
666 669 for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
667 670 Block* block = _cfg.get_block(i);
668 671 uint cnt = block->number_of_nodes();
669 672
670 673 // Handle all the normal Nodes in the block
671 674 for( uint j = 0; j < cnt; j++ ) {
672 675 Node *n = block->get_node(j);
673 676 // Pre-color to the zero live range, or pick virtual register
674 677 const RegMask &rm = n->out_RegMask();
675 678 _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
676 679 }
677 680 }
678 681
679 682 // Reset the Union-Find mapping to be identity
680 683 _lrg_map.reset_uf_map(lr_counter);
681 684 }
682 685
683 686
684 687 // Gather LiveRanGe information, including register masks. Modification of
685 688 // cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
686 689 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
687 690
688 691 // Nail down the frame pointer live range
689 692 uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr));
690 693 lrgs(fp_lrg)._cost += 1e12; // Cost is infinite
691 694
692 695 // For all blocks
693 696 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
694 697 Block* block = _cfg.get_block(i);
695 698
696 699 // For all instructions
697 700 for (uint j = 1; j < block->number_of_nodes(); j++) {
698 701 Node* n = block->get_node(j);
699 702 uint input_edge_start =1; // Skip control most nodes
700 703 if (n->is_Mach()) {
701 704 input_edge_start = n->as_Mach()->oper_input_base();
702 705 }
703 706 uint idx = n->is_Copy();
704 707
705 708 // Get virtual register number, same as LiveRanGe index
706 709 uint vreg = _lrg_map.live_range_id(n);
707 710 LRG& lrg = lrgs(vreg);
708 711 if (vreg) { // No vreg means un-allocable (e.g. memory)
709 712
710 713 // Collect has-copy bit
711 714 if (idx) {
712 715 lrg._has_copy = 1;
713 716 uint clidx = _lrg_map.live_range_id(n->in(idx));
714 717 LRG& copy_src = lrgs(clidx);
715 718 copy_src._has_copy = 1;
716 719 }
717 720
718 721 // Check for float-vs-int live range (used in register-pressure
719 722 // calculations)
720 723 const Type *n_type = n->bottom_type();
721 724 if (n_type->is_floatingpoint()) {
722 725 lrg._is_float = 1;
723 726 }
724 727
725 728 // Check for twice prior spilling. Once prior spilling might have
726 729 // spilled 'soft', 2nd prior spill should have spilled 'hard' and
727 730 // further spilling is unlikely to make progress.
728 731 if (_spilled_once.test(n->_idx)) {
729 732 lrg._was_spilled1 = 1;
730 733 if (_spilled_twice.test(n->_idx)) {
731 734 lrg._was_spilled2 = 1;
732 735 }
733 736 }
734 737
735 738 #ifndef PRODUCT
736 739 if (trace_spilling() && lrg._def != NULL) {
737 740 // collect defs for MultiDef printing
738 741 if (lrg._defs == NULL) {
739 742 lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
740 743 lrg._defs->append(lrg._def);
741 744 }
742 745 lrg._defs->append(n);
743 746 }
744 747 #endif
745 748
746 749 // Check for a single def LRG; these can spill nicely
747 750 // via rematerialization. Flag as NULL for no def found
748 751 // yet, or 'n' for single def or -1 for many defs.
749 752 lrg._def = lrg._def ? NodeSentinel : n;
750 753
751 754 // Limit result register mask to acceptable registers
752 755 const RegMask &rm = n->out_RegMask();
753 756 lrg.AND( rm );
754 757
755 758 int ireg = n->ideal_reg();
756 759 assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
757 760 "oops must be in Op_RegP's" );
758 761
759 762 // Check for vector live range (only if vector register is used).
760 763 // On SPARC vector uses RegD which could be misaligned so it is not
761 764 // processes as vector in RA.
762 765 if (RegMask::is_vector(ireg))
763 766 lrg._is_vector = 1;
764 767 assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD || ireg == Op_RegL,
765 768 "vector must be in vector registers");
766 769
767 770 // Check for bound register masks
768 771 const RegMask &lrgmask = lrg.mask();
769 772 if (lrgmask.is_bound(ireg)) {
770 773 lrg._is_bound = 1;
771 774 }
772 775
773 776 // Check for maximum frequency value
774 777 if (lrg._maxfreq < block->_freq) {
775 778 lrg._maxfreq = block->_freq;
776 779 }
777 780
778 781 // Check for oop-iness, or long/double
779 782 // Check for multi-kill projection
780 783 switch (ireg) {
781 784 case MachProjNode::fat_proj:
782 785 // Fat projections have size equal to number of registers killed
783 786 lrg.set_num_regs(rm.Size());
784 787 lrg.set_reg_pressure(lrg.num_regs());
785 788 lrg._fat_proj = 1;
786 789 lrg._is_bound = 1;
787 790 break;
788 791 case Op_RegP:
789 792 #ifdef _LP64
790 793 lrg.set_num_regs(2); // Size is 2 stack words
791 794 #else
792 795 lrg.set_num_regs(1); // Size is 1 stack word
793 796 #endif
794 797 // Register pressure is tracked relative to the maximum values
795 798 // suggested for that platform, INTPRESSURE and FLOATPRESSURE,
796 799 // and relative to other types which compete for the same regs.
797 800 //
798 801 // The following table contains suggested values based on the
799 802 // architectures as defined in each .ad file.
800 803 // INTPRESSURE and FLOATPRESSURE may be tuned differently for
801 804 // compile-speed or performance.
802 805 // Note1:
803 806 // SPARC and SPARCV9 reg_pressures are at 2 instead of 1
804 807 // since .ad registers are defined as high and low halves.
805 808 // These reg_pressure values remain compatible with the code
806 809 // in is_high_pressure() which relates get_invalid_mask_size(),
807 810 // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
808 811 // Note2:
809 812 // SPARC -d32 has 24 registers available for integral values,
810 813 // but only 10 of these are safe for 64-bit longs.
811 814 // Using set_reg_pressure(2) for both int and long means
812 815 // the allocator will believe it can fit 26 longs into
813 816 // registers. Using 2 for longs and 1 for ints means the
814 817 // allocator will attempt to put 52 integers into registers.
815 818 // The settings below limit this problem to methods with
816 819 // many long values which are being run on 32-bit SPARC.
817 820 //
818 821 // ------------------- reg_pressure --------------------
819 822 // Each entry is reg_pressure_per_value,number_of_regs
820 823 // RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE
821 824 // IA32 2 1 1 1 1 6 6
822 825 // IA64 1 1 1 1 1 50 41
823 826 // SPARC 2 2 2 2 2 48 (24) 52 (26)
824 827 // SPARCV9 2 2 2 2 2 48 (24) 52 (26)
825 828 // AMD64 1 1 1 1 1 14 15
826 829 // -----------------------------------------------------
827 830 #if defined(SPARC)
828 831 lrg.set_reg_pressure(2); // use for v9 as well
829 832 #else
830 833 lrg.set_reg_pressure(1); // normally one value per register
831 834 #endif
832 835 if( n_type->isa_oop_ptr() ) {
833 836 lrg._is_oop = 1;
834 837 }
835 838 break;
836 839 case Op_RegL: // Check for long or double
837 840 case Op_RegD:
838 841 lrg.set_num_regs(2);
839 842 // Define platform specific register pressure
840 843 #if defined(SPARC) || defined(ARM)
841 844 lrg.set_reg_pressure(2);
842 845 #elif defined(IA32)
843 846 if( ireg == Op_RegL ) {
844 847 lrg.set_reg_pressure(2);
845 848 } else {
846 849 lrg.set_reg_pressure(1);
847 850 }
848 851 #else
849 852 lrg.set_reg_pressure(1); // normally one value per register
850 853 #endif
851 854 // If this def of a double forces a mis-aligned double,
852 855 // flag as '_fat_proj' - really flag as allowing misalignment
853 856 // AND changes how we count interferences. A mis-aligned
854 857 // double can interfere with TWO aligned pairs, or effectively
855 858 // FOUR registers!
856 859 if (rm.is_misaligned_pair()) {
857 860 lrg._fat_proj = 1;
858 861 lrg._is_bound = 1;
859 862 }
860 863 break;
861 864 case Op_RegF:
862 865 case Op_RegI:
863 866 case Op_RegN:
864 867 case Op_RegFlags:
865 868 case 0: // not an ideal register
866 869 lrg.set_num_regs(1);
867 870 #ifdef SPARC
868 871 lrg.set_reg_pressure(2);
869 872 #else
870 873 lrg.set_reg_pressure(1);
871 874 #endif
872 875 break;
873 876 case Op_VecS:
874 877 assert(Matcher::vector_size_supported(T_BYTE,4), "sanity");
875 878 assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity");
876 879 lrg.set_num_regs(RegMask::SlotsPerVecS);
877 880 lrg.set_reg_pressure(1);
878 881 break;
879 882 case Op_VecD:
880 883 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity");
881 884 assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity");
882 885 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned");
883 886 lrg.set_num_regs(RegMask::SlotsPerVecD);
884 887 lrg.set_reg_pressure(1);
885 888 break;
886 889 case Op_VecX:
887 890 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity");
888 891 assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity");
889 892 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned");
890 893 lrg.set_num_regs(RegMask::SlotsPerVecX);
891 894 lrg.set_reg_pressure(1);
892 895 break;
893 896 case Op_VecY:
894 897 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity");
895 898 assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity");
896 899 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned");
897 900 lrg.set_num_regs(RegMask::SlotsPerVecY);
898 901 lrg.set_reg_pressure(1);
899 902 break;
900 903 default:
901 904 ShouldNotReachHere();
902 905 }
903 906 }
904 907
905 908 // Now do the same for inputs
906 909 uint cnt = n->req();
907 910 // Setup for CISC SPILLING
908 911 uint inp = (uint)AdlcVMDeps::Not_cisc_spillable;
909 912 if( UseCISCSpill && after_aggressive ) {
910 913 inp = n->cisc_operand();
911 914 if( inp != (uint)AdlcVMDeps::Not_cisc_spillable )
912 915 // Convert operand number to edge index number
913 916 inp = n->as_Mach()->operand_index(inp);
914 917 }
915 918 // Prepare register mask for each input
916 919 for( uint k = input_edge_start; k < cnt; k++ ) {
917 920 uint vreg = _lrg_map.live_range_id(n->in(k));
918 921 if (!vreg) {
919 922 continue;
920 923 }
921 924
922 925 // If this instruction is CISC Spillable, add the flags
923 926 // bit to its appropriate input
924 927 if( UseCISCSpill && after_aggressive && inp == k ) {
925 928 #ifndef PRODUCT
926 929 if( TraceCISCSpill ) {
927 930 tty->print(" use_cisc_RegMask: ");
928 931 n->dump();
929 932 }
930 933 #endif
931 934 n->as_Mach()->use_cisc_RegMask();
932 935 }
933 936
934 937 LRG &lrg = lrgs(vreg);
935 938 // // Testing for floating point code shape
936 939 // Node *test = n->in(k);
937 940 // if( test->is_Mach() ) {
938 941 // MachNode *m = test->as_Mach();
939 942 // int op = m->ideal_Opcode();
940 943 // if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) {
941 944 // int zzz = 1;
942 945 // }
943 946 // }
944 947
945 948 // Limit result register mask to acceptable registers.
946 949 // Do not limit registers from uncommon uses before
947 950 // AggressiveCoalesce. This effectively pre-virtual-splits
948 951 // around uncommon uses of common defs.
949 952 const RegMask &rm = n->in_RegMask(k);
950 953 if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
951 954 // Since we are BEFORE aggressive coalesce, leave the register
952 955 // mask untrimmed by the call. This encourages more coalescing.
953 956 // Later, AFTER aggressive, this live range will have to spill
954 957 // but the spiller handles slow-path calls very nicely.
955 958 } else {
956 959 lrg.AND( rm );
957 960 }
958 961
959 962 // Check for bound register masks
960 963 const RegMask &lrgmask = lrg.mask();
961 964 int kreg = n->in(k)->ideal_reg();
962 965 bool is_vect = RegMask::is_vector(kreg);
963 966 assert(n->in(k)->bottom_type()->isa_vect() == NULL ||
964 967 is_vect || kreg == Op_RegD || kreg == Op_RegL,
965 968 "vector must be in vector registers");
966 969 if (lrgmask.is_bound(kreg))
967 970 lrg._is_bound = 1;
968 971
969 972 // If this use of a double forces a mis-aligned double,
970 973 // flag as '_fat_proj' - really flag as allowing misalignment
971 974 // AND changes how we count interferences. A mis-aligned
972 975 // double can interfere with TWO aligned pairs, or effectively
973 976 // FOUR registers!
974 977 #ifdef ASSERT
975 978 if (is_vect) {
976 979 assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
977 980 assert(!lrg._fat_proj, "sanity");
978 981 assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
979 982 }
980 983 #endif
981 984 if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
982 985 lrg._fat_proj = 1;
983 986 lrg._is_bound = 1;
984 987 }
985 988 // if the LRG is an unaligned pair, we will have to spill
986 989 // so clear the LRG's register mask if it is not already spilled
987 990 if (!is_vect && !n->is_SpillCopy() &&
988 991 (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
989 992 lrgmask.is_misaligned_pair()) {
990 993 lrg.Clear();
991 994 }
992 995
993 996 // Check for maximum frequency value
994 997 if (lrg._maxfreq < block->_freq) {
995 998 lrg._maxfreq = block->_freq;
996 999 }
997 1000
998 1001 } // End for all allocated inputs
999 1002 } // end for all instructions
1000 1003 } // end for all blocks
1001 1004
1002 1005 // Final per-liverange setup
1003 1006 for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
1004 1007 LRG &lrg = lrgs(i2);
1005 1008 assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
1006 1009 if (lrg.num_regs() > 1 && !lrg._fat_proj) {
1007 1010 lrg.clear_to_sets();
1008 1011 }
1009 1012 lrg.compute_set_mask_size();
1010 1013 if (lrg.not_free()) { // Handle case where we lose from the start
1011 1014 lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
1012 1015 lrg._direct_conflict = 1;
1013 1016 }
1014 1017 lrg.set_degree(0); // no neighbors in IFG yet
1015 1018 }
1016 1019 }
1017 1020
1018 1021 // Set the was-lo-degree bit. Conservative coalescing should not change the
1019 1022 // colorability of the graph. If any live range was of low-degree before
1020 1023 // coalescing, it should Simplify. This call sets the was-lo-degree bit.
1021 1024 // The bit is checked in Simplify.
1022 1025 void PhaseChaitin::set_was_low() {
1023 1026 #ifdef ASSERT
1024 1027 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1025 1028 int size = lrgs(i).num_regs();
1026 1029 uint old_was_lo = lrgs(i)._was_lo;
1027 1030 lrgs(i)._was_lo = 0;
1028 1031 if( lrgs(i).lo_degree() ) {
1029 1032 lrgs(i)._was_lo = 1; // Trivially of low degree
1030 1033 } else { // Else check the Brigg's assertion
1031 1034 // Brigg's observation is that the lo-degree neighbors of a
1032 1035 // hi-degree live range will not interfere with the color choices
1033 1036 // of said hi-degree live range. The Simplify reverse-stack-coloring
1034 1037 // order takes care of the details. Hence you do not have to count
1035 1038 // low-degree neighbors when determining if this guy colors.
1036 1039 int briggs_degree = 0;
1037 1040 IndexSet *s = _ifg->neighbors(i);
1038 1041 IndexSetIterator elements(s);
1039 1042 uint lidx;
1040 1043 while((lidx = elements.next()) != 0) {
1041 1044 if( !lrgs(lidx).lo_degree() )
1042 1045 briggs_degree += MAX2(size,lrgs(lidx).num_regs());
1043 1046 }
1044 1047 if( briggs_degree < lrgs(i).degrees_of_freedom() )
1045 1048 lrgs(i)._was_lo = 1; // Low degree via the briggs assertion
1046 1049 }
1047 1050 assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
1048 1051 }
1049 1052 #endif
1050 1053 }
1051 1054
1052 1055 #define REGISTER_CONSTRAINED 16
1053 1056
1054 1057 // Compute cost/area ratio, in case we spill. Build the lo-degree list.
1055 1058 void PhaseChaitin::cache_lrg_info( ) {
1056 1059
1057 1060 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1058 1061 LRG &lrg = lrgs(i);
1059 1062
1060 1063 // Check for being of low degree: means we can be trivially colored.
1061 1064 // Low degree, dead or must-spill guys just get to simplify right away
1062 1065 if( lrg.lo_degree() ||
1063 1066 !lrg.alive() ||
1064 1067 lrg._must_spill ) {
1065 1068 // Split low degree list into those guys that must get a
1066 1069 // register and those that can go to register or stack.
1067 1070 // The idea is LRGs that can go register or stack color first when
1068 1071 // they have a good chance of getting a register. The register-only
1069 1072 // lo-degree live ranges always get a register.
1070 1073 OptoReg::Name hi_reg = lrg.mask().find_last_elem();
1071 1074 if( OptoReg::is_stack(hi_reg)) { // Can go to stack?
1072 1075 lrg._next = _lo_stk_degree;
1073 1076 _lo_stk_degree = i;
1074 1077 } else {
1075 1078 lrg._next = _lo_degree;
1076 1079 _lo_degree = i;
1077 1080 }
1078 1081 } else { // Else high degree
1079 1082 lrgs(_hi_degree)._prev = i;
1080 1083 lrg._next = _hi_degree;
1081 1084 lrg._prev = 0;
1082 1085 _hi_degree = i;
1083 1086 }
1084 1087 }
1085 1088 }
1086 1089
1087 1090 // Simplify the IFG by removing LRGs of low degree that have NO copies
1088 1091 void PhaseChaitin::Pre_Simplify( ) {
1089 1092
1090 1093 // Warm up the lo-degree no-copy list
1091 1094 int lo_no_copy = 0;
1092 1095 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1093 1096 if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
1094 1097 !lrgs(i).alive() ||
1095 1098 lrgs(i)._must_spill) {
1096 1099 lrgs(i)._next = lo_no_copy;
1097 1100 lo_no_copy = i;
1098 1101 }
1099 1102 }
1100 1103
1101 1104 while( lo_no_copy ) {
1102 1105 uint lo = lo_no_copy;
1103 1106 lo_no_copy = lrgs(lo)._next;
1104 1107 int size = lrgs(lo).num_regs();
1105 1108
1106 1109 // Put the simplified guy on the simplified list.
1107 1110 lrgs(lo)._next = _simplified;
1108 1111 _simplified = lo;
1109 1112
1110 1113 // Yank this guy from the IFG.
1111 1114 IndexSet *adj = _ifg->remove_node( lo );
1112 1115
1113 1116 // If any neighbors' degrees fall below their number of
1114 1117 // allowed registers, then put that neighbor on the low degree
1115 1118 // list. Note that 'degree' can only fall and 'numregs' is
1116 1119 // unchanged by this action. Thus the two are equal at most once,
1117 1120 // so LRGs hit the lo-degree worklists at most once.
1118 1121 IndexSetIterator elements(adj);
1119 1122 uint neighbor;
1120 1123 while ((neighbor = elements.next()) != 0) {
1121 1124 LRG *n = &lrgs(neighbor);
1122 1125 assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1123 1126
1124 1127 // Check for just becoming of-low-degree
1125 1128 if( n->just_lo_degree() && !n->_has_copy ) {
1126 1129 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1127 1130 // Put on lo-degree list
1128 1131 n->_next = lo_no_copy;
1129 1132 lo_no_copy = neighbor;
1130 1133 }
1131 1134 }
1132 1135 } // End of while lo-degree no_copy worklist not empty
1133 1136
1134 1137 // No more lo-degree no-copy live ranges to simplify
1135 1138 }
1136 1139
1137 1140 // Simplify the IFG by removing LRGs of low degree.
1138 1141 void PhaseChaitin::Simplify( ) {
1139 1142
1140 1143 while( 1 ) { // Repeat till simplified it all
1141 1144 // May want to explore simplifying lo_degree before _lo_stk_degree.
1142 1145 // This might result in more spills coloring into registers during
1143 1146 // Select().
1144 1147 while( _lo_degree || _lo_stk_degree ) {
1145 1148 // If possible, pull from lo_stk first
1146 1149 uint lo;
1147 1150 if( _lo_degree ) {
1148 1151 lo = _lo_degree;
1149 1152 _lo_degree = lrgs(lo)._next;
1150 1153 } else {
1151 1154 lo = _lo_stk_degree;
1152 1155 _lo_stk_degree = lrgs(lo)._next;
1153 1156 }
1154 1157
1155 1158 // Put the simplified guy on the simplified list.
1156 1159 lrgs(lo)._next = _simplified;
1157 1160 _simplified = lo;
1158 1161 // If this guy is "at risk" then mark his current neighbors
1159 1162 if( lrgs(lo)._at_risk ) {
1160 1163 IndexSetIterator elements(_ifg->neighbors(lo));
1161 1164 uint datum;
1162 1165 while ((datum = elements.next()) != 0) {
1163 1166 lrgs(datum)._risk_bias = lo;
1164 1167 }
1165 1168 }
1166 1169
1167 1170 // Yank this guy from the IFG.
1168 1171 IndexSet *adj = _ifg->remove_node( lo );
1169 1172
1170 1173 // If any neighbors' degrees fall below their number of
1171 1174 // allowed registers, then put that neighbor on the low degree
1172 1175 // list. Note that 'degree' can only fall and 'numregs' is
1173 1176 // unchanged by this action. Thus the two are equal at most once,
1174 1177 // so LRGs hit the lo-degree worklist at most once.
1175 1178 IndexSetIterator elements(adj);
1176 1179 uint neighbor;
1177 1180 while ((neighbor = elements.next()) != 0) {
1178 1181 LRG *n = &lrgs(neighbor);
1179 1182 #ifdef ASSERT
1180 1183 if( VerifyOpto || VerifyRegisterAllocator ) {
1181 1184 assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1182 1185 }
1183 1186 #endif
1184 1187
1185 1188 // Check for just becoming of-low-degree just counting registers.
1186 1189 // _must_spill live ranges are already on the low degree list.
1187 1190 if( n->just_lo_degree() && !n->_must_spill ) {
1188 1191 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1189 1192 // Pull from hi-degree list
1190 1193 uint prev = n->_prev;
1191 1194 uint next = n->_next;
1192 1195 if( prev ) lrgs(prev)._next = next;
1193 1196 else _hi_degree = next;
1194 1197 lrgs(next)._prev = prev;
1195 1198 n->_next = _lo_degree;
1196 1199 _lo_degree = neighbor;
1197 1200 }
1198 1201 }
1199 1202 } // End of while lo-degree/lo_stk_degree worklist not empty
1200 1203
1201 1204 // Check for got everything: is hi-degree list empty?
1202 1205 if( !_hi_degree ) break;
1203 1206
1204 1207 // Time to pick a potential spill guy
1205 1208 uint lo_score = _hi_degree;
1206 1209 double score = lrgs(lo_score).score();
1207 1210 double area = lrgs(lo_score)._area;
1208 1211 double cost = lrgs(lo_score)._cost;
1209 1212 bool bound = lrgs(lo_score)._is_bound;
1210 1213
1211 1214 // Find cheapest guy
1212 1215 debug_only( int lo_no_simplify=0; );
1213 1216 for( uint i = _hi_degree; i; i = lrgs(i)._next ) {
1214 1217 assert( !(*_ifg->_yanked)[i], "" );
1215 1218 // It's just vaguely possible to move hi-degree to lo-degree without
1216 1219 // going through a just-lo-degree stage: If you remove a double from
1217 1220 // a float live range it's degree will drop by 2 and you can skip the
1218 1221 // just-lo-degree stage. It's very rare (shows up after 5000+ methods
1219 1222 // in -Xcomp of Java2Demo). So just choose this guy to simplify next.
1220 1223 if( lrgs(i).lo_degree() ) {
1221 1224 lo_score = i;
1222 1225 break;
1223 1226 }
1224 1227 debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
1225 1228 double iscore = lrgs(i).score();
1226 1229 double iarea = lrgs(i)._area;
1227 1230 double icost = lrgs(i)._cost;
1228 1231 bool ibound = lrgs(i)._is_bound;
1229 1232
1230 1233 // Compare cost/area of i vs cost/area of lo_score. Smaller cost/area
1231 1234 // wins. Ties happen because all live ranges in question have spilled
1232 1235 // a few times before and the spill-score adds a huge number which
1233 1236 // washes out the low order bits. We are choosing the lesser of 2
1234 1237 // evils; in this case pick largest area to spill.
1235 1238 // Ties also happen when live ranges are defined and used only inside
1236 1239 // one block. In which case their area is 0 and score set to max.
1237 1240 // In such case choose bound live range over unbound to free registers
1238 1241 // or with smaller cost to spill.
1239 1242 if( iscore < score ||
1240 1243 (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
1241 1244 (iscore == score && iarea == area &&
1242 1245 ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) {
1243 1246 lo_score = i;
1244 1247 score = iscore;
1245 1248 area = iarea;
1246 1249 cost = icost;
1247 1250 bound = ibound;
1248 1251 }
1249 1252 }
1250 1253 LRG *lo_lrg = &lrgs(lo_score);
1251 1254 // The live range we choose for spilling is either hi-degree, or very
1252 1255 // rarely it can be low-degree. If we choose a hi-degree live range
1253 1256 // there better not be any lo-degree choices.
1254 1257 assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" );
1255 1258
1256 1259 // Pull from hi-degree list
1257 1260 uint prev = lo_lrg->_prev;
1258 1261 uint next = lo_lrg->_next;
1259 1262 if( prev ) lrgs(prev)._next = next;
1260 1263 else _hi_degree = next;
1261 1264 lrgs(next)._prev = prev;
1262 1265 // Jam him on the lo-degree list, despite his high degree.
1263 1266 // Maybe he'll get a color, and maybe he'll spill.
1264 1267 // Only Select() will know.
1265 1268 lrgs(lo_score)._at_risk = true;
1266 1269 _lo_degree = lo_score;
1267 1270 lo_lrg->_next = 0;
1268 1271
1269 1272 } // End of while not simplified everything
1270 1273
1271 1274 }
1272 1275
1273 1276 // Is 'reg' register legal for 'lrg'?
1274 1277 static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
1275 1278 if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
1276 1279 lrg.mask().Member(OptoReg::add(reg,-chunk))) {
1277 1280 // RA uses OptoReg which represent the highest element of a registers set.
1278 1281 // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
1279 1282 // in which XMMd is used by RA to represent such vectors. A double value
1280 1283 // uses [XMM,XMMb] pairs and XMMb is used by RA for it.
1281 1284 // The register mask uses largest bits set of overlapping register sets.
1282 1285 // On x86 with AVX it uses 8 bits for each XMM registers set.
1283 1286 //
1284 1287 // The 'lrg' already has cleared-to-set register mask (done in Select()
1285 1288 // before calling choose_color()). Passing mask.Member(reg) check above
1286 1289 // indicates that the size (num_regs) of 'reg' set is less or equal to
1287 1290 // 'lrg' set size.
1288 1291 // For set size 1 any register which is member of 'lrg' mask is legal.
1289 1292 if (lrg.num_regs()==1)
1290 1293 return true;
1291 1294 // For larger sets only an aligned register with the same set size is legal.
1292 1295 int mask = lrg.num_regs()-1;
1293 1296 if ((reg&mask) == mask)
1294 1297 return true;
1295 1298 }
1296 1299 return false;
1297 1300 }
1298 1301
1299 1302 // Choose a color using the biasing heuristic
1300 1303 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
1301 1304
1302 1305 // Check for "at_risk" LRG's
1303 1306 uint risk_lrg = _lrg_map.find(lrg._risk_bias);
1304 1307 if( risk_lrg != 0 ) {
1305 1308 // Walk the colored neighbors of the "at_risk" candidate
1306 1309 // Choose a color which is both legal and already taken by a neighbor
1307 1310 // of the "at_risk" candidate in order to improve the chances of the
1308 1311 // "at_risk" candidate of coloring
1309 1312 IndexSetIterator elements(_ifg->neighbors(risk_lrg));
1310 1313 uint datum;
1311 1314 while ((datum = elements.next()) != 0) {
1312 1315 OptoReg::Name reg = lrgs(datum).reg();
1313 1316 // If this LRG's register is legal for us, choose it
1314 1317 if (is_legal_reg(lrg, reg, chunk))
1315 1318 return reg;
1316 1319 }
1317 1320 }
1318 1321
1319 1322 uint copy_lrg = _lrg_map.find(lrg._copy_bias);
1320 1323 if( copy_lrg != 0 ) {
1321 1324 // If he has a color,
1322 1325 if( !(*(_ifg->_yanked))[copy_lrg] ) {
1323 1326 OptoReg::Name reg = lrgs(copy_lrg).reg();
1324 1327 // And it is legal for you,
1325 1328 if (is_legal_reg(lrg, reg, chunk))
1326 1329 return reg;
1327 1330 } else if( chunk == 0 ) {
1328 1331 // Choose a color which is legal for him
1329 1332 RegMask tempmask = lrg.mask();
1330 1333 tempmask.AND(lrgs(copy_lrg).mask());
1331 1334 tempmask.clear_to_sets(lrg.num_regs());
1332 1335 OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs());
1333 1336 if (OptoReg::is_valid(reg))
1334 1337 return reg;
1335 1338 }
1336 1339 }
1337 1340
1338 1341 // If no bias info exists, just go with the register selection ordering
1339 1342 if (lrg._is_vector || lrg.num_regs() == 2) {
1340 1343 // Find an aligned set
1341 1344 return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk);
1342 1345 }
1343 1346
1344 1347 // CNC - Fun hack. Alternate 1st and 2nd selection. Enables post-allocate
1345 1348 // copy removal to remove many more copies, by preventing a just-assigned
1346 1349 // register from being repeatedly assigned.
1347 1350 OptoReg::Name reg = lrg.mask().find_first_elem();
1348 1351 if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
1349 1352 // This 'Remove; find; Insert' idiom is an expensive way to find the
1350 1353 // SECOND element in the mask.
1351 1354 lrg.Remove(reg);
1352 1355 OptoReg::Name reg2 = lrg.mask().find_first_elem();
1353 1356 lrg.Insert(reg);
1354 1357 if( OptoReg::is_reg(reg2))
1355 1358 reg = reg2;
1356 1359 }
1357 1360 return OptoReg::add( reg, chunk );
1358 1361 }
1359 1362
1360 1363 // Choose a color in the current chunk
1361 1364 OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
1362 1365 assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
1363 1366 assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
1364 1367
1365 1368 if( lrg.num_regs() == 1 || // Common Case
1366 1369 !lrg._fat_proj ) // Aligned+adjacent pairs ok
1367 1370 // Use a heuristic to "bias" the color choice
1368 1371 return bias_color(lrg, chunk);
1369 1372
1370 1373 assert(!lrg._is_vector, "should be not vector here" );
1371 1374 assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
1372 1375
1373 1376 // Fat-proj case or misaligned double argument.
1374 1377 assert(lrg.compute_mask_size() == lrg.num_regs() ||
1375 1378 lrg.num_regs() == 2,"fat projs exactly color" );
1376 1379 assert( !chunk, "always color in 1st chunk" );
1377 1380 // Return the highest element in the set.
1378 1381 return lrg.mask().find_last_elem();
1379 1382 }
1380 1383
1381 1384 // Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted
1382 1385 // in reverse order of removal. As long as nothing of hi-degree was yanked,
1383 1386 // everything going back is guaranteed a color. Select that color. If some
1384 1387 // hi-degree LRG cannot get a color then we record that we must spill.
1385 1388 uint PhaseChaitin::Select( ) {
1386 1389 uint spill_reg = LRG::SPILL_REG;
1387 1390 _max_reg = OptoReg::Name(0); // Past max register used
1388 1391 while( _simplified ) {
1389 1392 // Pull next LRG from the simplified list - in reverse order of removal
1390 1393 uint lidx = _simplified;
1391 1394 LRG *lrg = &lrgs(lidx);
1392 1395 _simplified = lrg->_next;
1393 1396
1394 1397
1395 1398 #ifndef PRODUCT
1396 1399 if (trace_spilling()) {
1397 1400 ttyLocker ttyl;
1398 1401 tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(),
1399 1402 lrg->degrees_of_freedom());
1400 1403 lrg->dump();
1401 1404 }
1402 1405 #endif
1403 1406
1404 1407 // Re-insert into the IFG
1405 1408 _ifg->re_insert(lidx);
1406 1409 if( !lrg->alive() ) continue;
1407 1410 // capture allstackedness flag before mask is hacked
1408 1411 const int is_allstack = lrg->mask().is_AllStack();
1409 1412
1410 1413 // Yeah, yeah, yeah, I know, I know. I can refactor this
1411 1414 // to avoid the GOTO, although the refactored code will not
1412 1415 // be much clearer. We arrive here IFF we have a stack-based
1413 1416 // live range that cannot color in the current chunk, and it
1414 1417 // has to move into the next free stack chunk.
1415 1418 int chunk = 0; // Current chunk is first chunk
1416 1419 retry_next_chunk:
1417 1420
1418 1421 // Remove neighbor colors
1419 1422 IndexSet *s = _ifg->neighbors(lidx);
1420 1423
1421 1424 debug_only(RegMask orig_mask = lrg->mask();)
1422 1425 IndexSetIterator elements(s);
1423 1426 uint neighbor;
1424 1427 while ((neighbor = elements.next()) != 0) {
1425 1428 // Note that neighbor might be a spill_reg. In this case, exclusion
1426 1429 // of its color will be a no-op, since the spill_reg chunk is in outer
1427 1430 // space. Also, if neighbor is in a different chunk, this exclusion
1428 1431 // will be a no-op. (Later on, if lrg runs out of possible colors in
1429 1432 // its chunk, a new chunk of color may be tried, in which case
1430 1433 // examination of neighbors is started again, at retry_next_chunk.)
1431 1434 LRG &nlrg = lrgs(neighbor);
1432 1435 OptoReg::Name nreg = nlrg.reg();
1433 1436 // Only subtract masks in the same chunk
1434 1437 if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) {
1435 1438 #ifndef PRODUCT
1436 1439 uint size = lrg->mask().Size();
1437 1440 RegMask rm = lrg->mask();
1438 1441 #endif
1439 1442 lrg->SUBTRACT(nlrg.mask());
1440 1443 #ifndef PRODUCT
1441 1444 if (trace_spilling() && lrg->mask().Size() != size) {
1442 1445 ttyLocker ttyl;
1443 1446 tty->print("L%d ", lidx);
1444 1447 rm.dump();
1445 1448 tty->print(" intersected L%d ", neighbor);
1446 1449 nlrg.mask().dump();
1447 1450 tty->print(" removed ");
1448 1451 rm.SUBTRACT(lrg->mask());
1449 1452 rm.dump();
1450 1453 tty->print(" leaving ");
1451 1454 lrg->mask().dump();
1452 1455 tty->cr();
1453 1456 }
1454 1457 #endif
1455 1458 }
1456 1459 }
1457 1460 //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness");
1458 1461 // Aligned pairs need aligned masks
1459 1462 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1460 1463 if (lrg->num_regs() > 1 && !lrg->_fat_proj) {
1461 1464 lrg->clear_to_sets();
1462 1465 }
1463 1466
1464 1467 // Check if a color is available and if so pick the color
1465 1468 OptoReg::Name reg = choose_color( *lrg, chunk );
1466 1469 #ifdef SPARC
1467 1470 debug_only(lrg->compute_set_mask_size());
1468 1471 assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned");
1469 1472 #endif
1470 1473
1471 1474 //---------------
1472 1475 // If we fail to color and the AllStack flag is set, trigger
1473 1476 // a chunk-rollover event
1474 1477 if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) {
1475 1478 // Bump register mask up to next stack chunk
1476 1479 chunk += RegMask::CHUNK_SIZE;
1477 1480 lrg->Set_All();
1478 1481
1479 1482 goto retry_next_chunk;
1480 1483 }
1481 1484
1482 1485 //---------------
1483 1486 // Did we get a color?
1484 1487 else if( OptoReg::is_valid(reg)) {
1485 1488 #ifndef PRODUCT
1486 1489 RegMask avail_rm = lrg->mask();
1487 1490 #endif
1488 1491
1489 1492 // Record selected register
1490 1493 lrg->set_reg(reg);
1491 1494
1492 1495 if( reg >= _max_reg ) // Compute max register limit
1493 1496 _max_reg = OptoReg::add(reg,1);
1494 1497 // Fold reg back into normal space
1495 1498 reg = OptoReg::add(reg,-chunk);
1496 1499
1497 1500 // If the live range is not bound, then we actually had some choices
1498 1501 // to make. In this case, the mask has more bits in it than the colors
1499 1502 // chosen. Restrict the mask to just what was picked.
1500 1503 int n_regs = lrg->num_regs();
1501 1504 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1502 1505 if (n_regs == 1 || !lrg->_fat_proj) {
1503 1506 assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecY, "sanity");
1504 1507 lrg->Clear(); // Clear the mask
1505 1508 lrg->Insert(reg); // Set regmask to match selected reg
1506 1509 // For vectors and pairs, also insert the low bit of the pair
1507 1510 for (int i = 1; i < n_regs; i++)
1508 1511 lrg->Insert(OptoReg::add(reg,-i));
1509 1512 lrg->set_mask_size(n_regs);
1510 1513 } else { // Else fatproj
1511 1514 // mask must be equal to fatproj bits, by definition
1512 1515 }
1513 1516 #ifndef PRODUCT
1514 1517 if (trace_spilling()) {
1515 1518 ttyLocker ttyl;
1516 1519 tty->print("L%d selected ", lidx);
1517 1520 lrg->mask().dump();
1518 1521 tty->print(" from ");
1519 1522 avail_rm.dump();
1520 1523 tty->cr();
1521 1524 }
1522 1525 #endif
1523 1526 // Note that reg is the highest-numbered register in the newly-bound mask.
1524 1527 } // end color available case
1525 1528
1526 1529 //---------------
1527 1530 // Live range is live and no colors available
1528 1531 else {
1529 1532 assert( lrg->alive(), "" );
1530 1533 assert( !lrg->_fat_proj || lrg->is_multidef() ||
1531 1534 lrg->_def->outcnt() > 0, "fat_proj cannot spill");
1532 1535 assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
1533 1536
1534 1537 // Assign the special spillreg register
1535 1538 lrg->set_reg(OptoReg::Name(spill_reg++));
1536 1539 // Do not empty the regmask; leave mask_size lying around
1537 1540 // for use during Spilling
1538 1541 #ifndef PRODUCT
1539 1542 if( trace_spilling() ) {
1540 1543 ttyLocker ttyl;
1541 1544 tty->print("L%d spilling with neighbors: ", lidx);
1542 1545 s->dump();
1543 1546 debug_only(tty->print(" original mask: "));
1544 1547 debug_only(orig_mask.dump());
1545 1548 dump_lrg(lidx);
1546 1549 }
1547 1550 #endif
1548 1551 } // end spill case
1549 1552
1550 1553 }
1551 1554
1552 1555 return spill_reg-LRG::SPILL_REG; // Return number of spills
1553 1556 }
1554 1557
1555 1558 // Copy 'was_spilled'-edness from the source Node to the dst Node.
1556 1559 void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
1557 1560 if( _spilled_once.test(src->_idx) ) {
1558 1561 _spilled_once.set(dst->_idx);
1559 1562 lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
1560 1563 if( _spilled_twice.test(src->_idx) ) {
1561 1564 _spilled_twice.set(dst->_idx);
1562 1565 lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
1563 1566 }
1564 1567 }
1565 1568 }
1566 1569
1567 1570 // Set the 'spilled_once' or 'spilled_twice' flag on a node.
1568 1571 void PhaseChaitin::set_was_spilled( Node *n ) {
1569 1572 if( _spilled_once.test_set(n->_idx) )
1570 1573 _spilled_twice.set(n->_idx);
1571 1574 }
1572 1575
1573 1576 // Convert Ideal spill instructions into proper FramePtr + offset Loads and
1574 1577 // Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are.
1575 1578 void PhaseChaitin::fixup_spills() {
1576 1579 // This function does only cisc spill work.
1577 1580 if( !UseCISCSpill ) return;
1578 1581
1579 1582 NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); )
1580 1583
1581 1584 // Grab the Frame Pointer
1582 1585 Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr);
1583 1586
1584 1587 // For all blocks
1585 1588 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1586 1589 Block* block = _cfg.get_block(i);
1587 1590
1588 1591 // For all instructions in block
1589 1592 uint last_inst = block->end_idx();
1590 1593 for (uint j = 1; j <= last_inst; j++) {
1591 1594 Node* n = block->get_node(j);
1592 1595
1593 1596 // Dead instruction???
1594 1597 assert( n->outcnt() != 0 ||// Nothing dead after post alloc
1595 1598 C->top() == n || // Or the random TOP node
1596 1599 n->is_Proj(), // Or a fat-proj kill node
1597 1600 "No dead instructions after post-alloc" );
1598 1601
1599 1602 int inp = n->cisc_operand();
1600 1603 if( inp != AdlcVMDeps::Not_cisc_spillable ) {
1601 1604 // Convert operand number to edge index number
1602 1605 MachNode *mach = n->as_Mach();
1603 1606 inp = mach->operand_index(inp);
1604 1607 Node *src = n->in(inp); // Value to load or store
1605 1608 LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
1606 1609 OptoReg::Name src_reg = lrg_cisc.reg();
1607 1610 // Doubles record the HIGH register of an adjacent pair.
1608 1611 src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
1609 1612 if( OptoReg::is_stack(src_reg) ) { // If input is on stack
1610 1613 // This is a CISC Spill, get stack offset and construct new node
1611 1614 #ifndef PRODUCT
1612 1615 if( TraceCISCSpill ) {
1613 1616 tty->print(" reg-instr: ");
1614 1617 n->dump();
1615 1618 }
1616 1619 #endif
1617 1620 int stk_offset = reg2offset(src_reg);
1618 1621 // Bailout if we might exceed node limit when spilling this instruction
1619 1622 C->check_node_count(0, "out of nodes fixing spills");
1620 1623 if (C->failing()) return;
1621 1624 // Transform node
1622 1625 MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach();
1623 1626 cisc->set_req(inp,fp); // Base register is frame pointer
1624 1627 if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
1625 1628 assert( cisc->oper_input_base() == 2, "Only adding one edge");
1626 1629 cisc->ins_req(1,src); // Requires a memory edge
1627 1630 }
1628 1631 block->map_node(cisc, j); // Insert into basic block
1629 1632 n->subsume_by(cisc, C); // Correct graph
1630 1633 //
1631 1634 ++_used_cisc_instructions;
1632 1635 #ifndef PRODUCT
1633 1636 if( TraceCISCSpill ) {
1634 1637 tty->print(" cisc-instr: ");
1635 1638 cisc->dump();
1636 1639 }
1637 1640 #endif
1638 1641 } else {
1639 1642 #ifndef PRODUCT
1640 1643 if( TraceCISCSpill ) {
1641 1644 tty->print(" using reg-instr: ");
1642 1645 n->dump();
1643 1646 }
1644 1647 #endif
1645 1648 ++_unused_cisc_instructions; // input can be on stack
1646 1649 }
1647 1650 }
1648 1651
1649 1652 } // End of for all instructions
1650 1653
1651 1654 } // End of for all blocks
1652 1655 }
1653 1656
1654 1657 // Helper to stretch above; recursively discover the base Node for a
1655 1658 // given derived Node. Easy for AddP-related machine nodes, but needs
1656 1659 // to be recursive for derived Phis.
1657 1660 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1658 1661 // See if already computed; if so return it
1659 1662 if( derived_base_map[derived->_idx] )
1660 1663 return derived_base_map[derived->_idx];
1661 1664
1662 1665 // See if this happens to be a base.
1663 1666 // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1664 1667 // pointers derived from NULL! These are always along paths that
1665 1668 // can't happen at run-time but the optimizer cannot deduce it so
1666 1669 // we have to handle it gracefully.
1667 1670 assert(!derived->bottom_type()->isa_narrowoop() ||
1668 1671 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1669 1672 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1670 1673 // If its an OOP with a non-zero offset, then it is derived.
1671 1674 if( tj == NULL || tj->_offset == 0 ) {
1672 1675 derived_base_map[derived->_idx] = derived;
1673 1676 return derived;
1674 1677 }
1675 1678 // Derived is NULL+offset? Base is NULL!
1676 1679 if( derived->is_Con() ) {
1677 1680 Node *base = _matcher.mach_null();
1678 1681 assert(base != NULL, "sanity");
1679 1682 if (base->in(0) == NULL) {
1680 1683 // Initialize it once and make it shared:
1681 1684 // set control to _root and place it into Start block
1682 1685 // (where top() node is placed).
1683 1686 base->init_req(0, _cfg.get_root_node());
1684 1687 Block *startb = _cfg.get_block_for_node(C->top());
1685 1688 uint node_pos = startb->find_node(C->top());
1686 1689 startb->insert_node(base, node_pos);
1687 1690 _cfg.map_node_to_block(base, startb);
1688 1691 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1689 1692
1690 1693 // The loadConP0 might have projection nodes depending on architecture
1691 1694 // Add the projection nodes to the CFG
1692 1695 for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) {
1693 1696 Node* use = base->fast_out(i);
1694 1697 if (use->is_MachProj()) {
1695 1698 startb->insert_node(use, ++node_pos);
1696 1699 _cfg.map_node_to_block(use, startb);
1697 1700 new_lrg(use, maxlrg++);
1698 1701 }
1699 1702 }
1700 1703 }
1701 1704 if (_lrg_map.live_range_id(base) == 0) {
1702 1705 new_lrg(base, maxlrg++);
1703 1706 }
1704 1707 assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
1705 1708 derived_base_map[derived->_idx] = base;
1706 1709 return base;
1707 1710 }
1708 1711
1709 1712 // Check for AddP-related opcodes
1710 1713 if (!derived->is_Phi()) {
1711 1714 assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name()));
1712 1715 Node *base = derived->in(AddPNode::Base);
1713 1716 derived_base_map[derived->_idx] = base;
1714 1717 return base;
1715 1718 }
1716 1719
1717 1720 // Recursively find bases for Phis.
1718 1721 // First check to see if we can avoid a base Phi here.
1719 1722 Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg);
1720 1723 uint i;
1721 1724 for( i = 2; i < derived->req(); i++ )
1722 1725 if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg))
1723 1726 break;
1724 1727 // Went to the end without finding any different bases?
1725 1728 if( i == derived->req() ) { // No need for a base Phi here
1726 1729 derived_base_map[derived->_idx] = base;
1727 1730 return base;
1728 1731 }
1729 1732
1730 1733 // Now we see we need a base-Phi here to merge the bases
1731 1734 const Type *t = base->bottom_type();
1732 1735 base = new (C) PhiNode( derived->in(0), t );
1733 1736 for( i = 1; i < derived->req(); i++ ) {
1734 1737 base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
1735 1738 t = t->meet(base->in(i)->bottom_type());
1736 1739 }
1737 1740 base->as_Phi()->set_type(t);
1738 1741
1739 1742 // Search the current block for an existing base-Phi
1740 1743 Block *b = _cfg.get_block_for_node(derived);
1741 1744 for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1742 1745 Node *phi = b->get_node(i);
1743 1746 if( !phi->is_Phi() ) { // Found end of Phis with no match?
1744 1747 b->insert_node(base, i); // Must insert created Phi here as base
1745 1748 _cfg.map_node_to_block(base, b);
1746 1749 new_lrg(base,maxlrg++);
1747 1750 break;
1748 1751 }
1749 1752 // See if Phi matches.
1750 1753 uint j;
1751 1754 for( j = 1; j < base->req(); j++ )
1752 1755 if( phi->in(j) != base->in(j) &&
1753 1756 !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs
1754 1757 break;
1755 1758 if( j == base->req() ) { // All inputs match?
1756 1759 base = phi; // Then use existing 'phi' and drop 'base'
1757 1760 break;
1758 1761 }
1759 1762 }
1760 1763
1761 1764
1762 1765 // Cache info for later passes
1763 1766 derived_base_map[derived->_idx] = base;
1764 1767 return base;
1765 1768 }
1766 1769
1767 1770 // At each Safepoint, insert extra debug edges for each pair of derived value/
1768 1771 // base pointer that is live across the Safepoint for oopmap building. The
1769 1772 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
1770 1773 // required edge set.
1771 1774 bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
1772 1775 int must_recompute_live = false;
1773 1776 uint maxlrg = _lrg_map.max_lrg_id();
1774 1777 Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
1775 1778 memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
1776 1779
1777 1780 // For all blocks in RPO do...
1778 1781 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1779 1782 Block* block = _cfg.get_block(i);
1780 1783 // Note use of deep-copy constructor. I cannot hammer the original
1781 1784 // liveout bits, because they are needed by the following coalesce pass.
1782 1785 IndexSet liveout(_live->live(block));
1783 1786
1784 1787 for (uint j = block->end_idx() + 1; j > 1; j--) {
1785 1788 Node* n = block->get_node(j - 1);
1786 1789
1787 1790 // Pre-split compares of loop-phis. Loop-phis form a cycle we would
1788 1791 // like to see in the same register. Compare uses the loop-phi and so
1789 1792 // extends its live range BUT cannot be part of the cycle. If this
1790 1793 // extended live range overlaps with the update of the loop-phi value
1791 1794 // we need both alive at the same time -- which requires at least 1
1792 1795 // copy. But because Intel has only 2-address registers we end up with
1793 1796 // at least 2 copies, one before the loop-phi update instruction and
1794 1797 // one after. Instead we split the input to the compare just after the
1795 1798 // phi.
1796 1799 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1797 1800 Node *phi = n->in(1);
1798 1801 if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1799 1802 Block *phi_block = _cfg.get_block_for_node(phi);
1800 1803 if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
1801 1804 const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1802 1805 Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
1803 1806 insert_proj( phi_block, 1, spill, maxlrg++ );
1804 1807 n->set_req(1,spill);
1805 1808 must_recompute_live = true;
1806 1809 }
1807 1810 }
1808 1811 }
1809 1812
1810 1813 // Get value being defined
1811 1814 uint lidx = _lrg_map.live_range_id(n);
1812 1815 // Ignore the occasional brand-new live range
1813 1816 if (lidx && lidx < _lrg_map.max_lrg_id()) {
1814 1817 // Remove from live-out set
1815 1818 liveout.remove(lidx);
1816 1819
1817 1820 // Copies do not define a new value and so do not interfere.
1818 1821 // Remove the copies source from the liveout set before interfering.
1819 1822 uint idx = n->is_Copy();
1820 1823 if (idx) {
1821 1824 liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1822 1825 }
1823 1826 }
1824 1827
1825 1828 // Found a safepoint?
1826 1829 JVMState *jvms = n->jvms();
1827 1830 if( jvms ) {
1828 1831 // Now scan for a live derived pointer
1829 1832 IndexSetIterator elements(&liveout);
1830 1833 uint neighbor;
1831 1834 while ((neighbor = elements.next()) != 0) {
1832 1835 // Find reaching DEF for base and derived values
1833 1836 // This works because we are still in SSA during this call.
1834 1837 Node *derived = lrgs(neighbor)._def;
1835 1838 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1836 1839 assert(!derived->bottom_type()->isa_narrowoop() ||
1837 1840 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1838 1841 // If its an OOP with a non-zero offset, then it is derived.
1839 1842 if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1840 1843 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1841 1844 assert(base->_idx < _lrg_map.size(), "");
1842 1845 // Add reaching DEFs of derived pointer and base pointer as a
1843 1846 // pair of inputs
1844 1847 n->add_req(derived);
1845 1848 n->add_req(base);
1846 1849
1847 1850 // See if the base pointer is already live to this point.
1848 1851 // Since I'm working on the SSA form, live-ness amounts to
1849 1852 // reaching def's. So if I find the base's live range then
1850 1853 // I know the base's def reaches here.
1851 1854 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1852 1855 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1853 1856 (_lrg_map.live_range_id(base) > 0) && // not a constant
1854 1857 _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1855 1858 // Base pointer is not currently live. Since I stretched
1856 1859 // the base pointer to here and it crosses basic-block
1857 1860 // boundaries, the global live info is now incorrect.
1858 1861 // Recompute live.
1859 1862 must_recompute_live = true;
1860 1863 } // End of if base pointer is not live to debug info
1861 1864 }
1862 1865 } // End of scan all live data for derived ptrs crossing GC point
1863 1866 } // End of if found a GC point
1864 1867
1865 1868 // Make all inputs live
1866 1869 if (!n->is_Phi()) { // Phi function uses come from prior block
1867 1870 for (uint k = 1; k < n->req(); k++) {
1868 1871 uint lidx = _lrg_map.live_range_id(n->in(k));
1869 1872 if (lidx < _lrg_map.max_lrg_id()) {
1870 1873 liveout.insert(lidx);
1871 1874 }
1872 1875 }
1873 1876 }
1874 1877
1875 1878 } // End of forall instructions in block
1876 1879 liveout.clear(); // Free the memory used by liveout.
1877 1880
1878 1881 } // End of forall blocks
1879 1882 _lrg_map.set_max_lrg_id(maxlrg);
1880 1883
1881 1884 // If I created a new live range I need to recompute live
1882 1885 if (maxlrg != _ifg->_maxlrg) {
1883 1886 must_recompute_live = true;
1884 1887 }
1885 1888
1886 1889 return must_recompute_live != 0;
1887 1890 }
1888 1891
1889 1892 // Extend the node to LRG mapping
1890 1893
1891 1894 void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
1892 1895 _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
1893 1896 }
1894 1897
1895 1898 #ifndef PRODUCT
1896 1899 void PhaseChaitin::dump(const Node *n) const {
1897 1900 uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
1898 1901 tty->print("L%d",r);
1899 1902 if (r && n->Opcode() != Op_Phi) {
1900 1903 if( _node_regs ) { // Got a post-allocation copy of allocation?
1901 1904 tty->print("[");
1902 1905 OptoReg::Name second = get_reg_second(n);
1903 1906 if( OptoReg::is_valid(second) ) {
1904 1907 if( OptoReg::is_reg(second) )
1905 1908 tty->print("%s:",Matcher::regName[second]);
1906 1909 else
1907 1910 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second));
1908 1911 }
1909 1912 OptoReg::Name first = get_reg_first(n);
1910 1913 if( OptoReg::is_reg(first) )
1911 1914 tty->print("%s]",Matcher::regName[first]);
1912 1915 else
1913 1916 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first));
1914 1917 } else
1915 1918 n->out_RegMask().dump();
1916 1919 }
1917 1920 tty->print("/N%d\t",n->_idx);
1918 1921 tty->print("%s === ", n->Name());
1919 1922 uint k;
1920 1923 for (k = 0; k < n->req(); k++) {
1921 1924 Node *m = n->in(k);
1922 1925 if (!m) {
1923 1926 tty->print("_ ");
1924 1927 }
1925 1928 else {
1926 1929 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1927 1930 tty->print("L%d",r);
1928 1931 // Data MultiNode's can have projections with no real registers.
1929 1932 // Don't die while dumping them.
1930 1933 int op = n->Opcode();
1931 1934 if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) {
1932 1935 if( _node_regs ) {
1933 1936 tty->print("[");
1934 1937 OptoReg::Name second = get_reg_second(n->in(k));
1935 1938 if( OptoReg::is_valid(second) ) {
1936 1939 if( OptoReg::is_reg(second) )
1937 1940 tty->print("%s:",Matcher::regName[second]);
1938 1941 else
1939 1942 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer),
1940 1943 reg2offset_unchecked(second));
1941 1944 }
1942 1945 OptoReg::Name first = get_reg_first(n->in(k));
1943 1946 if( OptoReg::is_reg(first) )
1944 1947 tty->print("%s]",Matcher::regName[first]);
1945 1948 else
1946 1949 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer),
1947 1950 reg2offset_unchecked(first));
1948 1951 } else
1949 1952 n->in_RegMask(k).dump();
1950 1953 }
1951 1954 tty->print("/N%d ",m->_idx);
1952 1955 }
1953 1956 }
1954 1957 if( k < n->len() && n->in(k) ) tty->print("| ");
1955 1958 for( ; k < n->len(); k++ ) {
1956 1959 Node *m = n->in(k);
1957 1960 if(!m) {
1958 1961 break;
1959 1962 }
1960 1963 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1961 1964 tty->print("L%d",r);
1962 1965 tty->print("/N%d ",m->_idx);
1963 1966 }
1964 1967 if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
1965 1968 else n->dump_spec(tty);
1966 1969 if( _spilled_once.test(n->_idx ) ) {
1967 1970 tty->print(" Spill_1");
1968 1971 if( _spilled_twice.test(n->_idx ) )
1969 1972 tty->print(" Spill_2");
1970 1973 }
1971 1974 tty->print("\n");
1972 1975 }
1973 1976
1974 1977 void PhaseChaitin::dump(const Block *b) const {
1975 1978 b->dump_head(&_cfg);
1976 1979
1977 1980 // For all instructions
1978 1981 for( uint j = 0; j < b->number_of_nodes(); j++ )
1979 1982 dump(b->get_node(j));
1980 1983 // Print live-out info at end of block
1981 1984 if( _live ) {
1982 1985 tty->print("Liveout: ");
1983 1986 IndexSet *live = _live->live(b);
1984 1987 IndexSetIterator elements(live);
1985 1988 tty->print("{");
1986 1989 uint i;
1987 1990 while ((i = elements.next()) != 0) {
1988 1991 tty->print("L%d ", _lrg_map.find_const(i));
1989 1992 }
1990 1993 tty->print_cr("}");
1991 1994 }
1992 1995 tty->print("\n");
1993 1996 }
1994 1997
1995 1998 void PhaseChaitin::dump() const {
1996 1999 tty->print( "--- Chaitin -- argsize: %d framesize: %d ---\n",
1997 2000 _matcher._new_SP, _framesize );
1998 2001
1999 2002 // For all blocks
2000 2003 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2001 2004 dump(_cfg.get_block(i));
2002 2005 }
2003 2006 // End of per-block dump
2004 2007 tty->print("\n");
2005 2008
2006 2009 if (!_ifg) {
2007 2010 tty->print("(No IFG.)\n");
2008 2011 return;
2009 2012 }
2010 2013
2011 2014 // Dump LRG array
2012 2015 tty->print("--- Live RanGe Array ---\n");
2013 2016 for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
2014 2017 tty->print("L%d: ",i2);
2015 2018 if (i2 < _ifg->_maxlrg) {
2016 2019 lrgs(i2).dump();
2017 2020 }
2018 2021 else {
2019 2022 tty->print_cr("new LRG");
2020 2023 }
2021 2024 }
2022 2025 tty->cr();
2023 2026
2024 2027 // Dump lo-degree list
2025 2028 tty->print("Lo degree: ");
2026 2029 for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next )
2027 2030 tty->print("L%d ",i3);
2028 2031 tty->cr();
2029 2032
2030 2033 // Dump lo-stk-degree list
2031 2034 tty->print("Lo stk degree: ");
2032 2035 for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next )
2033 2036 tty->print("L%d ",i4);
2034 2037 tty->cr();
2035 2038
2036 2039 // Dump lo-degree list
2037 2040 tty->print("Hi degree: ");
2038 2041 for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next )
2039 2042 tty->print("L%d ",i5);
2040 2043 tty->cr();
2041 2044 }
2042 2045
2043 2046 void PhaseChaitin::dump_degree_lists() const {
2044 2047 // Dump lo-degree list
2045 2048 tty->print("Lo degree: ");
2046 2049 for( uint i = _lo_degree; i; i = lrgs(i)._next )
2047 2050 tty->print("L%d ",i);
2048 2051 tty->cr();
2049 2052
2050 2053 // Dump lo-stk-degree list
2051 2054 tty->print("Lo stk degree: ");
2052 2055 for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next )
2053 2056 tty->print("L%d ",i2);
2054 2057 tty->cr();
2055 2058
2056 2059 // Dump lo-degree list
2057 2060 tty->print("Hi degree: ");
2058 2061 for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
2059 2062 tty->print("L%d ",i3);
2060 2063 tty->cr();
2061 2064 }
2062 2065
2063 2066 void PhaseChaitin::dump_simplified() const {
2064 2067 tty->print("Simplified: ");
2065 2068 for( uint i = _simplified; i; i = lrgs(i)._next )
2066 2069 tty->print("L%d ",i);
2067 2070 tty->cr();
2068 2071 }
2069 2072
2070 2073 static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
2071 2074 if ((int)reg < 0)
2072 2075 sprintf(buf, "<OptoReg::%d>", (int)reg);
2073 2076 else if (OptoReg::is_reg(reg))
2074 2077 strcpy(buf, Matcher::regName[reg]);
2075 2078 else
2076 2079 sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer),
2077 2080 pc->reg2offset(reg));
2078 2081 return buf+strlen(buf);
2079 2082 }
2080 2083
2081 2084 // Dump a register name into a buffer. Be intelligent if we get called
2082 2085 // before allocation is complete.
2083 2086 char *PhaseChaitin::dump_register( const Node *n, char *buf ) const {
2084 2087 if( !this ) { // Not got anything?
2085 2088 sprintf(buf,"N%d",n->_idx); // Then use Node index
2086 2089 } else if( _node_regs ) {
2087 2090 // Post allocation, use direct mappings, no LRG info available
2088 2091 print_reg( get_reg_first(n), this, buf );
2089 2092 } else {
2090 2093 uint lidx = _lrg_map.find_const(n); // Grab LRG number
2091 2094 if( !_ifg ) {
2092 2095 sprintf(buf,"L%d",lidx); // No register binding yet
2093 2096 } else if( !lidx ) { // Special, not allocated value
2094 2097 strcpy(buf,"Special");
2095 2098 } else {
2096 2099 if (lrgs(lidx)._is_vector) {
2097 2100 if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs()))
2098 2101 print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register
2099 2102 else
2100 2103 sprintf(buf,"L%d",lidx); // No register binding yet
2101 2104 } else if( (lrgs(lidx).num_regs() == 1)
2102 2105 ? lrgs(lidx).mask().is_bound1()
2103 2106 : lrgs(lidx).mask().is_bound_pair() ) {
2104 2107 // Hah! We have a bound machine register
2105 2108 print_reg( lrgs(lidx).reg(), this, buf );
2106 2109 } else {
2107 2110 sprintf(buf,"L%d",lidx); // No register binding yet
2108 2111 }
2109 2112 }
2110 2113 }
2111 2114 return buf+strlen(buf);
2112 2115 }
2113 2116
2114 2117 void PhaseChaitin::dump_for_spill_split_recycle() const {
2115 2118 if( WizardMode && (PrintCompilation || PrintOpto) ) {
2116 2119 // Display which live ranges need to be split and the allocator's state
2117 2120 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2118 2121 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2119 2122 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2120 2123 tty->print("L%d: ", bidx);
2121 2124 lrgs(bidx).dump();
2122 2125 }
2123 2126 }
2124 2127 tty->cr();
2125 2128 dump();
2126 2129 }
2127 2130 }
2128 2131
2129 2132 void PhaseChaitin::dump_frame() const {
2130 2133 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2131 2134 const TypeTuple *domain = C->tf()->domain();
2132 2135 const int argcnt = domain->cnt() - TypeFunc::Parms;
2133 2136
2134 2137 // Incoming arguments in registers dump
2135 2138 for( int k = 0; k < argcnt; k++ ) {
2136 2139 OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2137 2140 if( OptoReg::is_reg(parmreg)) {
2138 2141 const char *reg_name = OptoReg::regname(parmreg);
2139 2142 tty->print("#r%3.3d %s", parmreg, reg_name);
2140 2143 parmreg = _matcher._parm_regs[k].second();
2141 2144 if( OptoReg::is_reg(parmreg)) {
2142 2145 tty->print(":%s", OptoReg::regname(parmreg));
2143 2146 }
2144 2147 tty->print(" : parm %d: ", k);
2145 2148 domain->field_at(k + TypeFunc::Parms)->dump();
2146 2149 tty->cr();
2147 2150 }
2148 2151 }
2149 2152
2150 2153 // Check for un-owned padding above incoming args
2151 2154 OptoReg::Name reg = _matcher._new_SP;
2152 2155 if( reg > _matcher._in_arg_limit ) {
2153 2156 reg = OptoReg::add(reg, -1);
2154 2157 tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2155 2158 }
2156 2159
2157 2160 // Incoming argument area dump
2158 2161 OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2159 2162 while( reg > begin_in_arg ) {
2160 2163 reg = OptoReg::add(reg, -1);
2161 2164 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2162 2165 int j;
2163 2166 for( j = 0; j < argcnt; j++) {
2164 2167 if( _matcher._parm_regs[j].first() == reg ||
2165 2168 _matcher._parm_regs[j].second() == reg ) {
2166 2169 tty->print("parm %d: ",j);
2167 2170 domain->field_at(j + TypeFunc::Parms)->dump();
2168 2171 tty->cr();
2169 2172 break;
2170 2173 }
2171 2174 }
2172 2175 if( j >= argcnt )
2173 2176 tty->print_cr("HOLE, owned by SELF");
2174 2177 }
2175 2178
2176 2179 // Old outgoing preserve area
2177 2180 while( reg > _matcher._old_SP ) {
2178 2181 reg = OptoReg::add(reg, -1);
2179 2182 tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2180 2183 }
2181 2184
2182 2185 // Old SP
2183 2186 tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2184 2187 reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2185 2188
2186 2189 // Preserve area dump
2187 2190 int fixed_slots = C->fixed_slots();
2188 2191 OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
2189 2192 OptoReg::Name return_addr = _matcher.return_addr();
2190 2193
2191 2194 reg = OptoReg::add(reg, -1);
2192 2195 while (OptoReg::is_stack(reg)) {
2193 2196 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2194 2197 if (return_addr == reg) {
2195 2198 tty->print_cr("return address");
2196 2199 } else if (reg >= begin_in_preserve) {
2197 2200 // Preserved slots are present on x86
2198 2201 if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
2199 2202 tty->print_cr("saved fp register");
2200 2203 else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
2201 2204 VerifyStackAtCalls)
2202 2205 tty->print_cr("0xBADB100D +VerifyStackAtCalls");
2203 2206 else
2204 2207 tty->print_cr("in_preserve");
2205 2208 } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
2206 2209 tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
2207 2210 } else {
2208 2211 tty->print_cr("pad2, stack alignment");
2209 2212 }
2210 2213 reg = OptoReg::add(reg, -1);
2211 2214 }
2212 2215
2213 2216 // Spill area dump
2214 2217 reg = OptoReg::add(_matcher._new_SP, _framesize );
2215 2218 while( reg > _matcher._out_arg_limit ) {
2216 2219 reg = OptoReg::add(reg, -1);
2217 2220 tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg));
2218 2221 }
2219 2222
2220 2223 // Outgoing argument area dump
2221 2224 while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) {
2222 2225 reg = OptoReg::add(reg, -1);
2223 2226 tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg));
2224 2227 }
2225 2228
2226 2229 // Outgoing new preserve area
2227 2230 while( reg > _matcher._new_SP ) {
2228 2231 reg = OptoReg::add(reg, -1);
2229 2232 tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg));
2230 2233 }
2231 2234 tty->print_cr("#");
2232 2235 }
2233 2236
2234 2237 void PhaseChaitin::dump_bb( uint pre_order ) const {
2235 2238 tty->print_cr("---dump of B%d---",pre_order);
2236 2239 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2237 2240 Block* block = _cfg.get_block(i);
2238 2241 if (block->_pre_order == pre_order) {
2239 2242 dump(block);
2240 2243 }
2241 2244 }
2242 2245 }
2243 2246
2244 2247 void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
2245 2248 tty->print_cr("---dump of L%d---",lidx);
2246 2249
2247 2250 if (_ifg) {
2248 2251 if (lidx >= _lrg_map.max_lrg_id()) {
2249 2252 tty->print("Attempt to print live range index beyond max live range.\n");
2250 2253 return;
2251 2254 }
2252 2255 tty->print("L%d: ",lidx);
2253 2256 if (lidx < _ifg->_maxlrg) {
2254 2257 lrgs(lidx).dump();
2255 2258 } else {
2256 2259 tty->print_cr("new LRG");
2257 2260 }
2258 2261 }
2259 2262 if( _ifg && lidx < _ifg->_maxlrg) {
2260 2263 tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
2261 2264 _ifg->neighbors(lidx)->dump();
2262 2265 tty->cr();
2263 2266 }
2264 2267 // For all blocks
2265 2268 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2266 2269 Block* block = _cfg.get_block(i);
2267 2270 int dump_once = 0;
2268 2271
2269 2272 // For all instructions
2270 2273 for( uint j = 0; j < block->number_of_nodes(); j++ ) {
2271 2274 Node *n = block->get_node(j);
2272 2275 if (_lrg_map.find_const(n) == lidx) {
2273 2276 if (!dump_once++) {
2274 2277 tty->cr();
2275 2278 block->dump_head(&_cfg);
2276 2279 }
2277 2280 dump(n);
2278 2281 continue;
2279 2282 }
2280 2283 if (!defs_only) {
2281 2284 uint cnt = n->req();
2282 2285 for( uint k = 1; k < cnt; k++ ) {
2283 2286 Node *m = n->in(k);
2284 2287 if (!m) {
2285 2288 continue; // be robust in the dumper
2286 2289 }
2287 2290 if (_lrg_map.find_const(m) == lidx) {
2288 2291 if (!dump_once++) {
2289 2292 tty->cr();
2290 2293 block->dump_head(&_cfg);
2291 2294 }
2292 2295 dump(n);
2293 2296 }
2294 2297 }
2295 2298 }
2296 2299 }
2297 2300 } // End of per-block dump
2298 2301 tty->cr();
2299 2302 }
2300 2303 #endif // not PRODUCT
2301 2304
2302 2305 int PhaseChaitin::_final_loads = 0;
2303 2306 int PhaseChaitin::_final_stores = 0;
2304 2307 int PhaseChaitin::_final_memoves= 0;
2305 2308 int PhaseChaitin::_final_copies = 0;
2306 2309 double PhaseChaitin::_final_load_cost = 0;
2307 2310 double PhaseChaitin::_final_store_cost = 0;
2308 2311 double PhaseChaitin::_final_memove_cost= 0;
2309 2312 double PhaseChaitin::_final_copy_cost = 0;
2310 2313 int PhaseChaitin::_conserv_coalesce = 0;
2311 2314 int PhaseChaitin::_conserv_coalesce_pair = 0;
2312 2315 int PhaseChaitin::_conserv_coalesce_trie = 0;
2313 2316 int PhaseChaitin::_conserv_coalesce_quad = 0;
2314 2317 int PhaseChaitin::_post_alloc = 0;
2315 2318 int PhaseChaitin::_lost_opp_pp_coalesce = 0;
2316 2319 int PhaseChaitin::_lost_opp_cflow_coalesce = 0;
2317 2320 int PhaseChaitin::_used_cisc_instructions = 0;
2318 2321 int PhaseChaitin::_unused_cisc_instructions = 0;
2319 2322 int PhaseChaitin::_allocator_attempts = 0;
2320 2323 int PhaseChaitin::_allocator_successes = 0;
2321 2324
2322 2325 #ifndef PRODUCT
2323 2326 uint PhaseChaitin::_high_pressure = 0;
2324 2327 uint PhaseChaitin::_low_pressure = 0;
2325 2328
2326 2329 void PhaseChaitin::print_chaitin_statistics() {
2327 2330 tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
2328 2331 tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
2329 2332 tty->print_cr("Adjusted spill cost = %7.0f.",
2330 2333 _final_load_cost*4.0 + _final_store_cost * 2.0 +
2331 2334 _final_copy_cost*1.0 + _final_memove_cost*12.0);
2332 2335 tty->print("Conservatively coalesced %d copies, %d pairs",
2333 2336 _conserv_coalesce, _conserv_coalesce_pair);
2334 2337 if( _conserv_coalesce_trie || _conserv_coalesce_quad )
2335 2338 tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
2336 2339 tty->print_cr(", %d post alloc.", _post_alloc);
2337 2340 if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce )
2338 2341 tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.",
2339 2342 _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce );
2340 2343 if( _used_cisc_instructions || _unused_cisc_instructions )
2341 2344 tty->print_cr("Used cisc instruction %d, remained in register %d",
2342 2345 _used_cisc_instructions, _unused_cisc_instructions);
2343 2346 if( _allocator_successes != 0 )
2344 2347 tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes);
2345 2348 tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure);
2346 2349 }
2347 2350 #endif // not PRODUCT
↓ open down ↓ |
1760 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX