Print this page
rev 6900 : 8048169: Change 8037816 breaks HS build on PPC64 and CPP-Interpreter platforms
Summary: Fix the matching of format string parameter types to the actual argument types for the PPC64 and CPP-Interpreter files in the same way as 8037816 already did it for all the other files
Reviewed-by: stefank, coleenp, dholmes
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp
+++ new/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp
1 1 /*
2 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
2 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 3 * Copyright 2012, 2014 SAP AG. All rights reserved.
4 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 5 *
6 6 * This code is free software; you can redistribute it and/or modify it
7 7 * under the terms of the GNU General Public License version 2 only, as
8 8 * published by the Free Software Foundation.
9 9 *
10 10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 13 * version 2 for more details (a copy is included in the LICENSE file that
14 14 * accompanied this code).
15 15 *
16 16 * You should have received a copy of the GNU General Public License version
17 17 * 2 along with this work; if not, write to the Free Software Foundation,
18 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 19 *
20 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 21 * or visit www.oracle.com if you need additional information or have any
22 22 * questions.
23 23 *
24 24 */
25 25
26 26 #include "precompiled.hpp"
27 27 #include "asm/assembler.inline.hpp"
28 28 #include "gc_interface/collectedHeap.inline.hpp"
29 29 #include "interpreter/interpreter.hpp"
30 30 #include "memory/cardTableModRefBS.hpp"
31 31 #include "memory/resourceArea.hpp"
32 32 #include "prims/methodHandles.hpp"
33 33 #include "runtime/biasedLocking.hpp"
34 34 #include "runtime/interfaceSupport.hpp"
35 35 #include "runtime/objectMonitor.hpp"
36 36 #include "runtime/os.hpp"
37 37 #include "runtime/sharedRuntime.hpp"
38 38 #include "runtime/stubRoutines.hpp"
39 39 #include "utilities/macros.hpp"
40 40 #if INCLUDE_ALL_GCS
41 41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
42 42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
43 43 #include "gc_implementation/g1/heapRegion.hpp"
44 44 #endif // INCLUDE_ALL_GCS
45 45
46 46 #ifdef PRODUCT
47 47 #define BLOCK_COMMENT(str) // nothing
48 48 #else
49 49 #define BLOCK_COMMENT(str) block_comment(str)
50 50 #endif
51 51
52 52 int AbstractAssembler::code_fill_byte() {
53 53 return 0x00; // illegal instruction 0x00000000
54 54 }
55 55
56 56 void Assembler::print_instruction(int inst) {
57 57 Unimplemented();
58 58 }
59 59
60 60 // Patch instruction `inst' at offset `inst_pos' to refer to
61 61 // `dest_pos' and return the resulting instruction. We should have
62 62 // pcs, not offsets, but since all is relative, it will work out fine.
63 63 int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
64 64 int m = 0; // mask for displacement field
65 65 int v = 0; // new value for displacement field
66 66
67 67 switch (inv_op_ppc(inst)) {
68 68 case b_op: m = li(-1); v = li(disp(dest_pos, inst_pos)); break;
69 69 case bc_op: m = bd(-1); v = bd(disp(dest_pos, inst_pos)); break;
70 70 default: ShouldNotReachHere();
71 71 }
72 72 return inst & ~m | v;
73 73 }
74 74
75 75 // Return the offset, relative to _code_begin, of the destination of
76 76 // the branch inst at offset pos.
77 77 int Assembler::branch_destination(int inst, int pos) {
78 78 int r = 0;
79 79 switch (inv_op_ppc(inst)) {
80 80 case b_op: r = bxx_destination_offset(inst, pos); break;
81 81 case bc_op: r = inv_bd_field(inst, pos); break;
82 82 default: ShouldNotReachHere();
83 83 }
84 84 return r;
85 85 }
86 86
87 87 // Low-level andi-one-instruction-macro.
88 88 void Assembler::andi(Register a, Register s, const int ui16) {
89 89 assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");
90 90 if (is_power_of_2_long(((jlong) ui16)+1)) {
91 91 // pow2minus1
92 92 clrldi(a, s, 64-log2_long((((jlong) ui16)+1)));
93 93 } else if (is_power_of_2_long((jlong) ui16)) {
94 94 // pow2
95 95 rlwinm(a, s, 0, 31-log2_long((jlong) ui16), 31-log2_long((jlong) ui16));
96 96 } else if (is_power_of_2_long((jlong)-ui16)) {
97 97 // negpow2
98 98 clrrdi(a, s, log2_long((jlong)-ui16));
99 99 } else {
100 100 andi_(a, s, ui16);
101 101 }
102 102 }
103 103
104 104 // RegisterOrConstant version.
105 105 void Assembler::ld(Register d, RegisterOrConstant roc, Register s1) {
106 106 if (roc.is_constant()) {
107 107 if (s1 == noreg) {
108 108 int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
109 109 Assembler::ld(d, simm16_rest, d);
110 110 } else if (is_simm(roc.as_constant(), 16)) {
111 111 Assembler::ld(d, roc.as_constant(), s1);
112 112 } else {
113 113 load_const_optimized(d, roc.as_constant());
114 114 Assembler::ldx(d, d, s1);
115 115 }
116 116 } else {
117 117 if (s1 == noreg)
118 118 Assembler::ld(d, 0, roc.as_register());
119 119 else
120 120 Assembler::ldx(d, roc.as_register(), s1);
121 121 }
122 122 }
123 123
124 124 void Assembler::lwa(Register d, RegisterOrConstant roc, Register s1) {
125 125 if (roc.is_constant()) {
126 126 if (s1 == noreg) {
127 127 int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
128 128 Assembler::lwa(d, simm16_rest, d);
129 129 } else if (is_simm(roc.as_constant(), 16)) {
130 130 Assembler::lwa(d, roc.as_constant(), s1);
131 131 } else {
132 132 load_const_optimized(d, roc.as_constant());
133 133 Assembler::lwax(d, d, s1);
134 134 }
135 135 } else {
136 136 if (s1 == noreg)
137 137 Assembler::lwa(d, 0, roc.as_register());
138 138 else
139 139 Assembler::lwax(d, roc.as_register(), s1);
140 140 }
141 141 }
142 142
143 143 void Assembler::lwz(Register d, RegisterOrConstant roc, Register s1) {
144 144 if (roc.is_constant()) {
145 145 if (s1 == noreg) {
146 146 int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
147 147 Assembler::lwz(d, simm16_rest, d);
148 148 } else if (is_simm(roc.as_constant(), 16)) {
149 149 Assembler::lwz(d, roc.as_constant(), s1);
150 150 } else {
151 151 load_const_optimized(d, roc.as_constant());
152 152 Assembler::lwzx(d, d, s1);
153 153 }
154 154 } else {
155 155 if (s1 == noreg)
156 156 Assembler::lwz(d, 0, roc.as_register());
157 157 else
158 158 Assembler::lwzx(d, roc.as_register(), s1);
159 159 }
160 160 }
161 161
162 162 void Assembler::lha(Register d, RegisterOrConstant roc, Register s1) {
163 163 if (roc.is_constant()) {
164 164 if (s1 == noreg) {
165 165 int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
166 166 Assembler::lha(d, simm16_rest, d);
167 167 } else if (is_simm(roc.as_constant(), 16)) {
168 168 Assembler::lha(d, roc.as_constant(), s1);
169 169 } else {
170 170 load_const_optimized(d, roc.as_constant());
171 171 Assembler::lhax(d, d, s1);
172 172 }
173 173 } else {
174 174 if (s1 == noreg)
175 175 Assembler::lha(d, 0, roc.as_register());
176 176 else
177 177 Assembler::lhax(d, roc.as_register(), s1);
178 178 }
179 179 }
180 180
181 181 void Assembler::lhz(Register d, RegisterOrConstant roc, Register s1) {
182 182 if (roc.is_constant()) {
183 183 if (s1 == noreg) {
184 184 int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
185 185 Assembler::lhz(d, simm16_rest, d);
186 186 } else if (is_simm(roc.as_constant(), 16)) {
187 187 Assembler::lhz(d, roc.as_constant(), s1);
188 188 } else {
189 189 load_const_optimized(d, roc.as_constant());
190 190 Assembler::lhzx(d, d, s1);
191 191 }
192 192 } else {
193 193 if (s1 == noreg)
194 194 Assembler::lhz(d, 0, roc.as_register());
195 195 else
196 196 Assembler::lhzx(d, roc.as_register(), s1);
197 197 }
198 198 }
199 199
200 200 void Assembler::lbz(Register d, RegisterOrConstant roc, Register s1) {
201 201 if (roc.is_constant()) {
202 202 if (s1 == noreg) {
203 203 int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
204 204 Assembler::lbz(d, simm16_rest, d);
205 205 } else if (is_simm(roc.as_constant(), 16)) {
206 206 Assembler::lbz(d, roc.as_constant(), s1);
207 207 } else {
208 208 load_const_optimized(d, roc.as_constant());
209 209 Assembler::lbzx(d, d, s1);
210 210 }
211 211 } else {
212 212 if (s1 == noreg)
213 213 Assembler::lbz(d, 0, roc.as_register());
214 214 else
215 215 Assembler::lbzx(d, roc.as_register(), s1);
216 216 }
217 217 }
218 218
219 219 void Assembler::std(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
220 220 if (roc.is_constant()) {
221 221 if (s1 == noreg) {
222 222 guarantee(tmp != noreg, "Need tmp reg to encode large constants");
223 223 int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
224 224 Assembler::std(d, simm16_rest, tmp);
225 225 } else if (is_simm(roc.as_constant(), 16)) {
226 226 Assembler::std(d, roc.as_constant(), s1);
227 227 } else {
228 228 guarantee(tmp != noreg, "Need tmp reg to encode large constants");
229 229 load_const_optimized(tmp, roc.as_constant());
230 230 Assembler::stdx(d, tmp, s1);
231 231 }
232 232 } else {
233 233 if (s1 == noreg)
234 234 Assembler::std(d, 0, roc.as_register());
235 235 else
236 236 Assembler::stdx(d, roc.as_register(), s1);
237 237 }
238 238 }
239 239
240 240 void Assembler::stw(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
241 241 if (roc.is_constant()) {
242 242 if (s1 == noreg) {
243 243 guarantee(tmp != noreg, "Need tmp reg to encode large constants");
244 244 int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
245 245 Assembler::stw(d, simm16_rest, tmp);
246 246 } else if (is_simm(roc.as_constant(), 16)) {
247 247 Assembler::stw(d, roc.as_constant(), s1);
248 248 } else {
249 249 guarantee(tmp != noreg, "Need tmp reg to encode large constants");
250 250 load_const_optimized(tmp, roc.as_constant());
251 251 Assembler::stwx(d, tmp, s1);
252 252 }
253 253 } else {
254 254 if (s1 == noreg)
255 255 Assembler::stw(d, 0, roc.as_register());
256 256 else
257 257 Assembler::stwx(d, roc.as_register(), s1);
258 258 }
259 259 }
260 260
261 261 void Assembler::sth(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
262 262 if (roc.is_constant()) {
263 263 if (s1 == noreg) {
264 264 guarantee(tmp != noreg, "Need tmp reg to encode large constants");
265 265 int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
266 266 Assembler::sth(d, simm16_rest, tmp);
267 267 } else if (is_simm(roc.as_constant(), 16)) {
268 268 Assembler::sth(d, roc.as_constant(), s1);
269 269 } else {
270 270 guarantee(tmp != noreg, "Need tmp reg to encode large constants");
271 271 load_const_optimized(tmp, roc.as_constant());
272 272 Assembler::sthx(d, tmp, s1);
273 273 }
274 274 } else {
275 275 if (s1 == noreg)
276 276 Assembler::sth(d, 0, roc.as_register());
277 277 else
278 278 Assembler::sthx(d, roc.as_register(), s1);
279 279 }
280 280 }
281 281
282 282 void Assembler::stb(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
283 283 if (roc.is_constant()) {
284 284 if (s1 == noreg) {
285 285 guarantee(tmp != noreg, "Need tmp reg to encode large constants");
286 286 int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
287 287 Assembler::stb(d, simm16_rest, tmp);
288 288 } else if (is_simm(roc.as_constant(), 16)) {
289 289 Assembler::stb(d, roc.as_constant(), s1);
290 290 } else {
291 291 guarantee(tmp != noreg, "Need tmp reg to encode large constants");
292 292 load_const_optimized(tmp, roc.as_constant());
293 293 Assembler::stbx(d, tmp, s1);
294 294 }
295 295 } else {
296 296 if (s1 == noreg)
297 297 Assembler::stb(d, 0, roc.as_register());
298 298 else
299 299 Assembler::stbx(d, roc.as_register(), s1);
300 300 }
301 301 }
302 302
303 303 void Assembler::add(Register d, RegisterOrConstant roc, Register s1) {
304 304 if (roc.is_constant()) {
305 305 intptr_t c = roc.as_constant();
306 306 assert(is_simm(c, 16), "too big");
307 307 addi(d, s1, (int)c);
308 308 }
309 309 else add(d, roc.as_register(), s1);
310 310 }
311 311
312 312 void Assembler::subf(Register d, RegisterOrConstant roc, Register s1) {
313 313 if (roc.is_constant()) {
314 314 intptr_t c = roc.as_constant();
315 315 assert(is_simm(-c, 16), "too big");
316 316 addi(d, s1, (int)-c);
317 317 }
318 318 else subf(d, roc.as_register(), s1);
319 319 }
320 320
321 321 void Assembler::cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1) {
322 322 if (roc.is_constant()) {
323 323 intptr_t c = roc.as_constant();
324 324 assert(is_simm(c, 16), "too big");
325 325 cmpdi(d, s1, (int)c);
326 326 }
327 327 else cmpd(d, roc.as_register(), s1);
328 328 }
329 329
330 330 // Load a 64 bit constant. Patchable.
331 331 void Assembler::load_const(Register d, long x, Register tmp) {
332 332 // 64-bit value: x = xa xb xc xd
333 333 int xa = (x >> 48) & 0xffff;
334 334 int xb = (x >> 32) & 0xffff;
335 335 int xc = (x >> 16) & 0xffff;
336 336 int xd = (x >> 0) & 0xffff;
337 337 if (tmp == noreg) {
338 338 Assembler::lis( d, (int)(short)xa);
339 339 Assembler::ori( d, d, (unsigned int)xb);
340 340 Assembler::sldi(d, d, 32);
341 341 Assembler::oris(d, d, (unsigned int)xc);
342 342 Assembler::ori( d, d, (unsigned int)xd);
343 343 } else {
344 344 // exploit instruction level parallelism if we have a tmp register
345 345 assert_different_registers(d, tmp);
346 346 Assembler::lis(tmp, (int)(short)xa);
347 347 Assembler::lis(d, (int)(short)xc);
348 348 Assembler::ori(tmp, tmp, (unsigned int)xb);
349 349 Assembler::ori(d, d, (unsigned int)xd);
350 350 Assembler::insrdi(d, tmp, 32, 0);
351 351 }
352 352 }
353 353
354 354 // Load a 64 bit constant, optimized, not identifyable.
355 355 // Tmp can be used to increase ILP. Set return_simm16_rest=true to get a
356 356 // 16 bit immediate offset.
357 357 int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) {
358 358 // Avoid accidentally trying to use R0 for indexed addressing.
359 359 assert(d != R0, "R0 not allowed");
360 360 assert_different_registers(d, tmp);
361 361
362 362 short xa, xb, xc, xd; // Four 16-bit chunks of const.
363 363 long rem = x; // Remaining part of const.
364 364
365 365 xd = rem & 0xFFFF; // Lowest 16-bit chunk.
366 366 rem = (rem >> 16) + ((unsigned short)xd >> 15); // Compensation for sign extend.
367 367
368 368 if (rem == 0) { // opt 1: simm16
369 369 li(d, xd);
370 370 return 0;
371 371 }
372 372
373 373 xc = rem & 0xFFFF; // Next 16-bit chunk.
374 374 rem = (rem >> 16) + ((unsigned short)xc >> 15); // Compensation for sign extend.
375 375
376 376 if (rem == 0) { // opt 2: simm32
377 377 lis(d, xc);
378 378 } else { // High 32 bits needed.
379 379
380 380 if (tmp != noreg) { // opt 3: We have a temp reg.
381 381 // No carry propagation between xc and higher chunks here (use logical instructions).
382 382 xa = (x >> 48) & 0xffff;
383 383 xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0.
384 384 bool load_xa = (xa != 0) || (xb < 0);
385 385 bool return_xd = false;
386 386
387 387 if (load_xa) { lis(tmp, xa); }
388 388 if (xc) { lis(d, xc); }
389 389 if (load_xa) {
390 390 if (xb) { ori(tmp, tmp, (unsigned short)xb); } // No addi, we support tmp == R0.
391 391 } else {
392 392 li(tmp, xb); // non-negative
393 393 }
394 394 if (xc) {
395 395 if (return_simm16_rest && xd >= 0) { return_xd = true; } // >= 0 to avoid carry propagation after insrdi/rldimi.
396 396 else if (xd) { addi(d, d, xd); }
397 397 } else {
398 398 li(d, xd);
399 399 }
400 400 insrdi(d, tmp, 32, 0);
401 401 return return_xd ? xd : 0; // non-negative
402 402 }
403 403
404 404 xb = rem & 0xFFFF; // Next 16-bit chunk.
405 405 rem = (rem >> 16) + ((unsigned short)xb >> 15); // Compensation for sign extend.
406 406
407 407 xa = rem & 0xFFFF; // Highest 16-bit chunk.
408 408
409 409 // opt 4: avoid adding 0
410 410 if (xa) { // Highest 16-bit needed?
411 411 lis(d, xa);
412 412 if (xb) { addi(d, d, xb); }
413 413 } else {
414 414 li(d, xb);
415 415 }
416 416 sldi(d, d, 32);
417 417 if (xc) { addis(d, d, xc); }
418 418 }
419 419
420 420 // opt 5: Return offset to be inserted into following instruction.
421 421 if (return_simm16_rest) return xd;
422 422
423 423 if (xd) { addi(d, d, xd); }
424 424 return 0;
425 425 }
426 426
427 427 #ifndef PRODUCT
428 428 // Test of ppc assembler.
429 429 void Assembler::test_asm() {
430 430 // PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
431 431 addi( R0, R1, 10);
432 432 addis( R5, R2, 11);
433 433 addic_( R3, R31, 42);
434 434 subfic( R21, R12, 2112);
435 435 add( R3, R2, R1);
436 436 add_( R11, R22, R30);
437 437 subf( R7, R6, R5);
438 438 subf_( R8, R9, R4);
439 439 addc( R11, R12, R13);
440 440 addc_( R14, R14, R14);
441 441 subfc( R15, R16, R17);
442 442 subfc_( R18, R20, R19);
443 443 adde( R20, R22, R24);
444 444 adde_( R29, R27, R26);
445 445 subfe( R28, R1, R0);
446 446 subfe_( R21, R11, R29);
447 447 neg( R21, R22);
448 448 neg_( R13, R23);
449 449 mulli( R0, R11, -31);
450 450 mulld( R1, R18, R21);
451 451 mulld_( R2, R17, R22);
452 452 mullw( R3, R16, R23);
453 453 mullw_( R4, R15, R24);
454 454 divd( R5, R14, R25);
455 455 divd_( R6, R13, R26);
456 456 divw( R7, R12, R27);
457 457 divw_( R8, R11, R28);
458 458
459 459 li( R3, -4711);
460 460
461 461 // PPC 1, section 3.3.9, Fixed-Point Compare Instructions
462 462 cmpi( CCR7, 0, R27, 4711);
463 463 cmp( CCR0, 1, R14, R11);
464 464 cmpli( CCR5, 1, R17, 45);
465 465 cmpl( CCR3, 0, R9, R10);
466 466
467 467 cmpwi( CCR7, R27, 4711);
468 468 cmpw( CCR0, R14, R11);
469 469 cmplwi( CCR5, R17, 45);
470 470 cmplw( CCR3, R9, R10);
471 471
472 472 cmpdi( CCR7, R27, 4711);
473 473 cmpd( CCR0, R14, R11);
474 474 cmpldi( CCR5, R17, 45);
475 475 cmpld( CCR3, R9, R10);
476 476
477 477 // PPC 1, section 3.3.11, Fixed-Point Logical Instructions
478 478 andi_( R4, R5, 0xff);
479 479 andis_( R12, R13, 0x7b51);
480 480 ori( R1, R4, 13);
481 481 oris( R3, R5, 177);
482 482 xori( R7, R6, 51);
483 483 xoris( R29, R0, 1);
484 484 andr( R17, R21, R16);
485 485 and_( R3, R5, R15);
486 486 orr( R2, R1, R9);
487 487 or_( R17, R15, R11);
488 488 xorr( R19, R18, R10);
489 489 xor_( R31, R21, R11);
490 490 nand( R5, R7, R3);
491 491 nand_( R3, R1, R0);
492 492 nor( R2, R3, R5);
493 493 nor_( R3, R6, R8);
494 494 andc( R25, R12, R11);
495 495 andc_( R24, R22, R21);
496 496 orc( R20, R10, R12);
497 497 orc_( R22, R2, R13);
498 498
499 499 nop();
500 500
501 501 // PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
502 502 sld( R5, R6, R8);
503 503 sld_( R3, R5, R9);
504 504 slw( R2, R1, R10);
505 505 slw_( R6, R26, R16);
506 506 srd( R16, R24, R8);
507 507 srd_( R21, R14, R7);
508 508 srw( R22, R25, R29);
509 509 srw_( R5, R18, R17);
510 510 srad( R7, R11, R0);
511 511 srad_( R9, R13, R1);
512 512 sraw( R7, R15, R2);
513 513 sraw_( R4, R17, R3);
514 514 sldi( R3, R18, 63);
515 515 sldi_( R2, R20, 30);
516 516 slwi( R1, R21, 30);
517 517 slwi_( R7, R23, 8);
518 518 srdi( R0, R19, 2);
519 519 srdi_( R12, R24, 5);
520 520 srwi( R13, R27, 6);
521 521 srwi_( R14, R29, 7);
522 522 sradi( R15, R30, 9);
523 523 sradi_( R16, R31, 19);
524 524 srawi( R17, R31, 15);
525 525 srawi_( R18, R31, 12);
526 526
527 527 clrrdi( R3, R30, 5);
528 528 clrldi( R9, R10, 11);
529 529
530 530 rldicr( R19, R20, 13, 15);
531 531 rldicr_(R20, R20, 16, 14);
532 532 rldicl( R21, R21, 30, 33);
533 533 rldicl_(R22, R1, 20, 25);
534 534 rlwinm( R23, R2, 25, 10, 11);
535 535 rlwinm_(R24, R3, 12, 13, 14);
536 536
537 537 // PPC 1, section 3.3.2 Fixed-Point Load Instructions
538 538 lwzx( R3, R5, R7);
539 539 lwz( R11, 0, R1);
540 540 lwzu( R31, -4, R11);
541 541
542 542 lwax( R3, R5, R7);
543 543 lwa( R31, -4, R11);
544 544 lhzx( R3, R5, R7);
545 545 lhz( R31, -4, R11);
546 546 lhzu( R31, -4, R11);
547 547
548 548
549 549 lhax( R3, R5, R7);
550 550 lha( R31, -4, R11);
551 551 lhau( R11, 0, R1);
552 552
553 553 lbzx( R3, R5, R7);
554 554 lbz( R31, -4, R11);
555 555 lbzu( R11, 0, R1);
556 556
557 557 ld( R31, -4, R11);
558 558 ldx( R3, R5, R7);
559 559 ldu( R31, -4, R11);
560 560
561 561 // PPC 1, section 3.3.3 Fixed-Point Store Instructions
562 562 stwx( R3, R5, R7);
563 563 stw( R31, -4, R11);
564 564 stwu( R11, 0, R1);
565 565
566 566 sthx( R3, R5, R7 );
567 567 sth( R31, -4, R11);
568 568 sthu( R31, -4, R11);
569 569
570 570 stbx( R3, R5, R7);
571 571 stb( R31, -4, R11);
572 572 stbu( R31, -4, R11);
573 573
574 574 std( R31, -4, R11);
575 575 stdx( R3, R5, R7);
576 576 stdu( R31, -4, R11);
577 577
578 578 // PPC 1, section 3.3.13 Move To/From System Register Instructions
579 579 mtlr( R3);
580 580 mflr( R3);
581 581 mtctr( R3);
582 582 mfctr( R3);
583 583 mtcrf( 0xff, R15);
584 584 mtcr( R15);
585 585 mtcrf( 0x03, R15);
586 586 mtcr( R15);
587 587 mfcr( R15);
588 588
589 589 // PPC 1, section 2.4.1 Branch Instructions
590 590 Label lbl1, lbl2, lbl3;
591 591 bind(lbl1);
592 592
593 593 b(pc());
594 594 b(pc() - 8);
595 595 b(lbl1);
596 596 b(lbl2);
597 597 b(lbl3);
598 598
599 599 bl(pc() - 8);
600 600 bl(lbl1);
601 601 bl(lbl2);
602 602
603 603 bcl(4, 10, pc() - 8);
604 604 bcl(4, 10, lbl1);
605 605 bcl(4, 10, lbl2);
606 606
607 607 bclr( 4, 6, 0);
608 608 bclrl(4, 6, 0);
609 609
610 610 bind(lbl2);
611 611
612 612 bcctr( 4, 6, 0);
613 613 bcctrl(4, 6, 0);
614 614
615 615 blt(CCR0, lbl2);
616 616 bgt(CCR1, lbl2);
617 617 beq(CCR2, lbl2);
618 618 bso(CCR3, lbl2);
619 619 bge(CCR4, lbl2);
620 620 ble(CCR5, lbl2);
621 621 bne(CCR6, lbl2);
622 622 bns(CCR7, lbl2);
623 623
624 624 bltl(CCR0, lbl2);
625 625 bgtl(CCR1, lbl2);
626 626 beql(CCR2, lbl2);
627 627 bsol(CCR3, lbl2);
628 628 bgel(CCR4, lbl2);
629 629 blel(CCR5, lbl2);
630 630 bnel(CCR6, lbl2);
631 631 bnsl(CCR7, lbl2);
632 632 blr();
633 633
634 634 sync();
635 635 icbi( R1, R2);
636 636 dcbst(R2, R3);
637 637
638 638 // FLOATING POINT instructions ppc.
639 639 // PPC 1, section 4.6.2 Floating-Point Load Instructions
640 640 lfs( F1, -11, R3);
641 641 lfsu(F2, 123, R4);
642 642 lfsx(F3, R5, R6);
643 643 lfd( F4, 456, R7);
644 644 lfdu(F5, 789, R8);
645 645 lfdx(F6, R10, R11);
646 646
647 647 // PPC 1, section 4.6.3 Floating-Point Store Instructions
648 648 stfs( F7, 876, R12);
649 649 stfsu( F8, 543, R13);
650 650 stfsx( F9, R14, R15);
651 651 stfd( F10, 210, R16);
652 652 stfdu( F11, 111, R17);
653 653 stfdx( F12, R18, R19);
654 654
655 655 // PPC 1, section 4.6.4 Floating-Point Move Instructions
656 656 fmr( F13, F14);
657 657 fmr_( F14, F15);
658 658 fneg( F16, F17);
659 659 fneg_( F18, F19);
660 660 fabs( F20, F21);
661 661 fabs_( F22, F23);
662 662 fnabs( F24, F25);
663 663 fnabs_(F26, F27);
664 664
665 665 // PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic
666 666 // Instructions
667 667 fadd( F28, F29, F30);
668 668 fadd_( F31, F0, F1);
669 669 fadds( F2, F3, F4);
670 670 fadds_(F5, F6, F7);
671 671 fsub( F8, F9, F10);
672 672 fsub_( F11, F12, F13);
673 673 fsubs( F14, F15, F16);
674 674 fsubs_(F17, F18, F19);
675 675 fmul( F20, F21, F22);
676 676 fmul_( F23, F24, F25);
677 677 fmuls( F26, F27, F28);
678 678 fmuls_(F29, F30, F31);
679 679 fdiv( F0, F1, F2);
680 680 fdiv_( F3, F4, F5);
681 681 fdivs( F6, F7, F8);
682 682 fdivs_(F9, F10, F11);
683 683
684 684 // PPC 1, section 4.6.6 Floating-Point Rounding and Conversion
685 685 // Instructions
↓ open down ↓ |
673 lines elided |
↑ open up ↑ |
686 686 frsp( F12, F13);
687 687 fctid( F14, F15);
688 688 fctidz(F16, F17);
689 689 fctiw( F18, F19);
690 690 fctiwz(F20, F21);
691 691 fcfid( F22, F23);
692 692
693 693 // PPC 1, section 4.6.7 Floating-Point Compare Instructions
694 694 fcmpu( CCR7, F24, F25);
695 695
696 - tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
696 + tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", p2i(code()->insts_begin()), p2i(code()->insts_end()));
697 697 code()->decode();
698 698 }
699 699
700 700 #endif // !PRODUCT
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX