Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/opto/parse3.cpp
+++ new/src/share/vm/opto/parse3.cpp
1 1 /*
2 2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "compiler/compileLog.hpp"
27 27 #include "interpreter/linkResolver.hpp"
28 28 #include "memory/universe.inline.hpp"
29 29 #include "oops/objArrayKlass.hpp"
30 30 #include "opto/addnode.hpp"
31 31 #include "opto/memnode.hpp"
32 32 #include "opto/parse.hpp"
33 33 #include "opto/rootnode.hpp"
34 34 #include "opto/runtime.hpp"
35 35 #include "opto/subnode.hpp"
36 36 #include "runtime/deoptimization.hpp"
37 37 #include "runtime/handles.inline.hpp"
38 38
39 39 //=============================================================================
40 40 // Helper methods for _get* and _put* bytecodes
41 41 //=============================================================================
42 42 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
43 43 // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
44 44 // Better to check now than to Deoptimize as soon as we execute
45 45 assert( field->is_static(), "Only check if field is static");
46 46 // is_being_initialized() is too generous. It allows access to statics
47 47 // by threads that are not running the <clinit> before the <clinit> finishes.
48 48 // return field->holder()->is_being_initialized();
49 49
50 50 // The following restriction is correct but conservative.
51 51 // It is also desirable to allow compilation of methods called from <clinit>
52 52 // but this generated code will need to be made safe for execution by
53 53 // other threads, or the transition from interpreted to compiled code would
54 54 // need to be guarded.
55 55 ciInstanceKlass *field_holder = field->holder();
56 56
57 57 bool access_OK = false;
58 58 if (method->holder()->is_subclass_of(field_holder)) {
59 59 if (method->is_static()) {
60 60 if (method->name() == ciSymbol::class_initializer_name()) {
61 61 // OK to access static fields inside initializer
62 62 access_OK = true;
63 63 }
64 64 } else {
65 65 if (method->name() == ciSymbol::object_initializer_name()) {
66 66 // It's also OK to access static fields inside a constructor,
67 67 // because any thread calling the constructor must first have
68 68 // synchronized on the class by executing a '_new' bytecode.
69 69 access_OK = true;
70 70 }
71 71 }
72 72 }
73 73
74 74 return access_OK;
75 75
76 76 }
77 77
78 78
79 79 void Parse::do_field_access(bool is_get, bool is_field) {
80 80 bool will_link;
81 81 ciField* field = iter().get_field(will_link);
82 82 assert(will_link, "getfield: typeflow responsibility");
83 83
84 84 ciInstanceKlass* field_holder = field->holder();
85 85
86 86 if (is_field == field->is_static()) {
87 87 // Interpreter will throw java_lang_IncompatibleClassChangeError
88 88 // Check this before allowing <clinit> methods to access static fields
89 89 uncommon_trap(Deoptimization::Reason_unhandled,
90 90 Deoptimization::Action_none);
91 91 return;
92 92 }
93 93
94 94 if (!is_field && !field_holder->is_initialized()) {
95 95 if (!static_field_ok_in_clinit(field, method())) {
96 96 uncommon_trap(Deoptimization::Reason_uninitialized,
97 97 Deoptimization::Action_reinterpret,
98 98 NULL, "!static_field_ok_in_clinit");
99 99 return;
100 100 }
101 101 }
102 102
103 103 // Deoptimize on putfield writes to CallSite.target
104 104 if (!is_get && field->is_call_site_target()) {
105 105 uncommon_trap(Deoptimization::Reason_unhandled,
106 106 Deoptimization::Action_reinterpret,
107 107 NULL, "put to CallSite.target field");
108 108 return;
109 109 }
110 110
111 111 assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");
112 112
113 113 // Note: We do not check for an unloaded field type here any more.
114 114
115 115 // Generate code for the object pointer.
116 116 Node* obj;
117 117 if (is_field) {
118 118 int obj_depth = is_get ? 0 : field->type()->size();
119 119 obj = do_null_check(peek(obj_depth), T_OBJECT);
120 120 // Compile-time detect of null-exception?
121 121 if (stopped()) return;
122 122
123 123 #ifdef ASSERT
124 124 const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
125 125 assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
126 126 #endif
127 127
128 128 if (is_get) {
129 129 --_sp; // pop receiver before getting
130 130 do_get_xxx(obj, field, is_field);
131 131 } else {
132 132 do_put_xxx(obj, field, is_field);
133 133 --_sp; // pop receiver after putting
134 134 }
135 135 } else {
136 136 const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
137 137 obj = _gvn.makecon(tip);
138 138 if (is_get) {
139 139 do_get_xxx(obj, field, is_field);
↓ open down ↓ |
139 lines elided |
↑ open up ↑ |
140 140 } else {
141 141 do_put_xxx(obj, field, is_field);
142 142 }
143 143 }
144 144 }
145 145
146 146
147 147 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
148 148 // Does this field have a constant value? If so, just push the value.
149 149 if (field->is_constant()) {
150 + // final field
150 151 if (field->is_static()) {
151 152 // final static field
152 153 if (push_constant(field->constant_value()))
153 154 return;
154 155 }
155 156 else {
156 - // final non-static field of a trusted class (classes in
157 - // java.lang.invoke and sun.invoke packages and subpackages).
157 + // final non-static field
158 + // Treat final non-static fields of trusted classes (classes in
159 + // java.lang.invoke and sun.invoke packages and subpackages) as
160 + // compile time constants.
158 161 if (obj->is_Con()) {
159 162 const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
160 163 ciObject* constant_oop = oop_ptr->const_oop();
161 164 ciConstant constant = field->constant_value_of(constant_oop);
162 -
163 165 if (push_constant(constant, true))
164 166 return;
165 167 }
166 168 }
167 169 }
168 170
169 171 ciType* field_klass = field->type();
170 172 bool is_vol = field->is_volatile();
171 173
172 174 // Compute address and memory type.
173 175 int offset = field->offset_in_bytes();
174 176 const TypePtr* adr_type = C->alias_type(field)->adr_type();
175 177 Node *adr = basic_plus_adr(obj, obj, offset);
176 178 BasicType bt = field->layout_type();
177 179
178 180 // Build the resultant type of the load
179 181 const Type *type;
180 182
181 183 bool must_assert_null = false;
182 184
183 185 if( bt == T_OBJECT ) {
184 186 if (!field->type()->is_loaded()) {
185 187 type = TypeInstPtr::BOTTOM;
186 188 must_assert_null = true;
187 189 } else if (field->is_constant() && field->is_static()) {
188 190 // This can happen if the constant oop is non-perm.
189 191 ciObject* con = field->constant_value().as_object();
190 192 // Do not "join" in the previous type; it doesn't add value,
191 193 // and may yield a vacuous result if the field is of interface type.
192 194 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
193 195 assert(type != NULL, "field singleton type must be consistent");
194 196 } else {
195 197 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
196 198 }
197 199 } else {
198 200 type = Type::get_const_basic_type(bt);
199 201 }
200 202 // Build the load.
201 203 Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);
202 204
203 205 // Adjust Java stack
204 206 if (type2size[bt] == 1)
205 207 push(ld);
206 208 else
207 209 push_pair(ld);
208 210
209 211 if (must_assert_null) {
210 212 // Do not take a trap here. It's possible that the program
211 213 // will never load the field's class, and will happily see
212 214 // null values in this field forever. Don't stumble into a
213 215 // trap for such a program, or we might get a long series
214 216 // of useless recompilations. (Or, we might load a class
215 217 // which should not be loaded.) If we ever see a non-null
216 218 // value, we will then trap and recompile. (The trap will
217 219 // not need to mention the class index, since the class will
218 220 // already have been loaded if we ever see a non-null value.)
219 221 // uncommon_trap(iter().get_field_signature_index());
220 222 #ifndef PRODUCT
221 223 if (PrintOpto && (Verbose || WizardMode)) {
222 224 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
223 225 }
224 226 #endif
225 227 if (C->log() != NULL) {
226 228 C->log()->elem("assert_null reason='field' klass='%d'",
227 229 C->log()->identify(field->type()));
228 230 }
229 231 // If there is going to be a trap, put it at the next bytecode:
230 232 set_bci(iter().next_bci());
231 233 do_null_assert(peek(), T_OBJECT);
232 234 set_bci(iter().cur_bci()); // put it back
233 235 }
234 236
235 237 // If reference is volatile, prevent following memory ops from
236 238 // floating up past the volatile read. Also prevents commoning
237 239 // another volatile read.
238 240 if (field->is_volatile()) {
239 241 // Memory barrier includes bogus read of value to force load BEFORE membar
240 242 insert_mem_bar(Op_MemBarAcquire, ld);
241 243 }
242 244 }
243 245
244 246 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
245 247 bool is_vol = field->is_volatile();
246 248 // If reference is volatile, prevent following memory ops from
247 249 // floating down past the volatile write. Also prevents commoning
248 250 // another volatile read.
249 251 if (is_vol) insert_mem_bar(Op_MemBarRelease);
250 252
251 253 // Compute address and memory type.
252 254 int offset = field->offset_in_bytes();
253 255 const TypePtr* adr_type = C->alias_type(field)->adr_type();
254 256 Node* adr = basic_plus_adr(obj, obj, offset);
255 257 BasicType bt = field->layout_type();
256 258 // Value to be stored
257 259 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
258 260 // Round doubles before storing
259 261 if (bt == T_DOUBLE) val = dstore_rounding(val);
260 262
261 263 // Store the value.
262 264 Node* store;
263 265 if (bt == T_OBJECT) {
264 266 const TypeOopPtr* field_type;
265 267 if (!field->type()->is_loaded()) {
266 268 field_type = TypeInstPtr::BOTTOM;
267 269 } else {
268 270 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
269 271 }
270 272 store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt);
271 273 } else {
272 274 store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
273 275 }
274 276
275 277 // If reference is volatile, prevent following volatiles ops from
276 278 // floating up before the volatile write.
277 279 if (is_vol) {
278 280 // First place the specific membar for THIS volatile index. This first
279 281 // membar is dependent on the store, keeping any other membars generated
280 282 // below from floating up past the store.
281 283 int adr_idx = C->get_alias_index(adr_type);
282 284 insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store);
283 285
284 286 // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
285 287 // volatile alias indices. Skip this if the membar is redundant.
286 288 if (adr_idx != Compile::AliasIdxBot) {
287 289 insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store);
288 290 }
289 291
290 292 // Finally, place alias-index-specific membars for each volatile index
291 293 // that isn't the adr_idx membar. Typically there's only 1 or 2.
292 294 for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
293 295 if (i != adr_idx && C->alias_type(i)->is_volatile()) {
294 296 insert_mem_bar_volatile(Op_MemBarVolatile, i, store);
295 297 }
296 298 }
297 299 }
298 300
299 301 // If the field is final, the rules of Java say we are in <init> or <clinit>.
300 302 // Note the presence of writes to final non-static fields, so that we
301 303 // can insert a memory barrier later on to keep the writes from floating
302 304 // out of the constructor.
303 305 if (is_field && field->is_final()) {
304 306 set_wrote_final(true);
305 307 }
306 308 }
307 309
308 310
309 311 bool Parse::push_constant(ciConstant constant, bool require_constant) {
310 312 switch (constant.basic_type()) {
311 313 case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break;
312 314 case T_INT: push( intcon(constant.as_int()) ); break;
313 315 case T_CHAR: push( intcon(constant.as_char()) ); break;
314 316 case T_BYTE: push( intcon(constant.as_byte()) ); break;
315 317 case T_SHORT: push( intcon(constant.as_short()) ); break;
316 318 case T_FLOAT: push( makecon(TypeF::make(constant.as_float())) ); break;
317 319 case T_DOUBLE: push_pair( makecon(TypeD::make(constant.as_double())) ); break;
318 320 case T_LONG: push_pair( longcon(constant.as_long()) ); break;
319 321 case T_ARRAY:
320 322 case T_OBJECT: {
321 323 // cases:
322 324 // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0)
323 325 // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
324 326 // An oop is not scavengable if it is in the perm gen.
325 327 ciObject* oop_constant = constant.as_object();
326 328 if (oop_constant->is_null_object()) {
327 329 push( zerocon(T_OBJECT) );
328 330 break;
329 331 } else if (require_constant || oop_constant->should_be_constant()) {
330 332 push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
331 333 break;
332 334 } else {
333 335 // we cannot inline the oop, but we can use it later to narrow a type
334 336 return false;
335 337 }
336 338 }
337 339 case T_ILLEGAL: {
338 340 // Invalid ciConstant returned due to OutOfMemoryError in the CI
339 341 assert(C->env()->failing(), "otherwise should not see this");
340 342 // These always occur because of object types; we are going to
341 343 // bail out anyway, so make the stack depths match up
342 344 push( zerocon(T_OBJECT) );
343 345 return false;
344 346 }
345 347 default:
346 348 ShouldNotReachHere();
347 349 return false;
348 350 }
349 351
350 352 // success
351 353 return true;
352 354 }
353 355
354 356
355 357
356 358 //=============================================================================
357 359 void Parse::do_anewarray() {
358 360 bool will_link;
359 361 ciKlass* klass = iter().get_klass(will_link);
360 362
361 363 // Uncommon Trap when class that array contains is not loaded
362 364 // we need the loaded class for the rest of graph; do not
363 365 // initialize the container class (see Java spec)!!!
364 366 assert(will_link, "anewarray: typeflow responsibility");
365 367
366 368 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
367 369 // Check that array_klass object is loaded
368 370 if (!array_klass->is_loaded()) {
369 371 // Generate uncommon_trap for unloaded array_class
370 372 uncommon_trap(Deoptimization::Reason_unloaded,
371 373 Deoptimization::Action_reinterpret,
372 374 array_klass);
373 375 return;
374 376 }
375 377
376 378 kill_dead_locals();
377 379
378 380 const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
379 381 Node* count_val = pop();
380 382 Node* obj = new_array(makecon(array_klass_type), count_val, 1);
381 383 push(obj);
382 384 }
383 385
384 386
385 387 void Parse::do_newarray(BasicType elem_type) {
386 388 kill_dead_locals();
387 389
388 390 Node* count_val = pop();
389 391 const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
390 392 Node* obj = new_array(makecon(array_klass), count_val, 1);
391 393 // Push resultant oop onto stack
392 394 push(obj);
393 395 }
394 396
395 397 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
396 398 // Also handle the degenerate 1-dimensional case of anewarray.
397 399 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
398 400 Node* length = lengths[0];
399 401 assert(length != NULL, "");
400 402 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
401 403 if (ndimensions > 1) {
402 404 jint length_con = find_int_con(length, -1);
403 405 guarantee(length_con >= 0, "non-constant multianewarray");
404 406 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
405 407 const TypePtr* adr_type = TypeAryPtr::OOPS;
406 408 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
407 409 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
408 410 for (jint i = 0; i < length_con; i++) {
409 411 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
410 412 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
411 413 Node* eaddr = basic_plus_adr(array, offset);
412 414 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
413 415 }
414 416 }
415 417 return array;
416 418 }
417 419
418 420 void Parse::do_multianewarray() {
419 421 int ndimensions = iter().get_dimensions();
420 422
421 423 // the m-dimensional array
422 424 bool will_link;
423 425 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
424 426 assert(will_link, "multianewarray: typeflow responsibility");
425 427
426 428 // Note: Array classes are always initialized; no is_initialized check.
427 429
428 430 kill_dead_locals();
429 431
430 432 // get the lengths from the stack (first dimension is on top)
431 433 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
432 434 length[ndimensions] = NULL; // terminating null for make_runtime_call
433 435 int j;
434 436 for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
435 437
436 438 // The original expression was of this form: new T[length0][length1]...
437 439 // It is often the case that the lengths are small (except the last).
438 440 // If that happens, use the fast 1-d creator a constant number of times.
439 441 const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100);
440 442 jint expand_count = 1; // count of allocations in the expansion
441 443 jint expand_fanout = 1; // running total fanout
442 444 for (j = 0; j < ndimensions-1; j++) {
443 445 jint dim_con = find_int_con(length[j], -1);
444 446 expand_fanout *= dim_con;
445 447 expand_count += expand_fanout; // count the level-J sub-arrays
446 448 if (dim_con <= 0
447 449 || dim_con > expand_limit
448 450 || expand_count > expand_limit) {
449 451 expand_count = 0;
450 452 break;
451 453 }
452 454 }
453 455
454 456 // Can use multianewarray instead of [a]newarray if only one dimension,
455 457 // or if all non-final dimensions are small constants.
456 458 if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
457 459 Node* obj = NULL;
458 460 // Set the original stack and the reexecute bit for the interpreter
459 461 // to reexecute the multianewarray bytecode if deoptimization happens.
460 462 // Do it unconditionally even for one dimension multianewarray.
461 463 // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
462 464 // when AllocateArray node for newarray is created.
463 465 { PreserveReexecuteState preexecs(this);
464 466 _sp += ndimensions;
465 467 // Pass 0 as nargs since uncommon trap code does not need to restore stack.
466 468 obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
467 469 } //original reexecute and sp are set back here
468 470 push(obj);
469 471 return;
470 472 }
471 473
472 474 address fun = NULL;
473 475 switch (ndimensions) {
474 476 case 1: ShouldNotReachHere(); break;
475 477 case 2: fun = OptoRuntime::multianewarray2_Java(); break;
476 478 case 3: fun = OptoRuntime::multianewarray3_Java(); break;
477 479 case 4: fun = OptoRuntime::multianewarray4_Java(); break;
478 480 case 5: fun = OptoRuntime::multianewarray5_Java(); break;
479 481 };
480 482 Node* c = NULL;
481 483
482 484 if (fun != NULL) {
483 485 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
484 486 OptoRuntime::multianewarray_Type(ndimensions),
485 487 fun, NULL, TypeRawPtr::BOTTOM,
486 488 makecon(TypeKlassPtr::make(array_klass)),
487 489 length[0], length[1], length[2],
488 490 length[3], length[4]);
489 491 } else {
490 492 // Create a java array for dimension sizes
491 493 Node* dims = NULL;
492 494 { PreserveReexecuteState preexecs(this);
493 495 _sp += ndimensions;
494 496 Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
495 497 dims = new_array(dims_array_klass, intcon(ndimensions), 0);
496 498
497 499 // Fill-in it with values
498 500 for (j = 0; j < ndimensions; j++) {
499 501 Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
500 502 store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS);
501 503 }
502 504 }
503 505
504 506 c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
505 507 OptoRuntime::multianewarrayN_Type(),
506 508 OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
507 509 makecon(TypeKlassPtr::make(array_klass)),
508 510 dims);
509 511 }
510 512
511 513 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms));
512 514
513 515 const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
514 516
515 517 // Improve the type: We know it's not null, exact, and of a given length.
516 518 type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
517 519 type = type->is_aryptr()->cast_to_exactness(true);
518 520
519 521 const TypeInt* ltype = _gvn.find_int_type(length[0]);
520 522 if (ltype != NULL)
521 523 type = type->is_aryptr()->cast_to_size(ltype);
522 524
523 525 // We cannot sharpen the nested sub-arrays, since the top level is mutable.
524 526
525 527 Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) );
526 528 push(cast);
527 529
528 530 // Possible improvements:
529 531 // - Make a fast path for small multi-arrays. (W/ implicit init. loops.)
530 532 // - Issue CastII against length[*] values, to TypeInt::POS.
531 533 }
↓ open down ↓ |
359 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX