168 large_disp = 0;
169 }
170 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
171 // index is illegal so replace it with the displacement loaded into a register
172 index = new_pointer_register();
173 __ move(LIR_OprFact::intptrConst(large_disp), index);
174 large_disp = 0;
175 }
176
177 // at this point we either have base + index or base + displacement
178 if (large_disp == 0) {
179 return new LIR_Address(base, index, type);
180 } else {
181 assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
182 return new LIR_Address(base, large_disp, type);
183 }
184 }
185
186
187 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
188 BasicType type, bool needs_card_mark) {
189 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
190 int elem_size = type2aelembytes(type);
191 int shift = exact_log2(elem_size);
192
193 LIR_Address* addr;
194 if (index_opr->is_constant()) {
195 addr = new LIR_Address(array_opr,
196 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
197 } else {
198 if (offset_in_bytes) {
199 LIR_Opr tmp = new_pointer_register();
200 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
201 array_opr = tmp;
202 offset_in_bytes = 0;
203 }
204 addr = new LIR_Address(array_opr,
205 index_opr,
206 LIR_Address::scale(type),
207 offset_in_bytes, type);
208 }
209 if (needs_card_mark) {
210 // This store will need a precise card mark, so go ahead and
211 // compute the full adddres instead of computing once for the
212 // store and again for the card mark.
213 LIR_Opr tmp = new_pointer_register();
214 __ leal(LIR_OprFact::address(addr), tmp);
215 return new LIR_Address(tmp, type);
216 } else {
217 return addr;
218 }
219 }
220
221 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
222 LIR_Opr r;
223 if (type == T_LONG) {
224 r = LIR_OprFact::longConst(x);
225 if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
226 LIR_Opr tmp = new_register(type);
227 __ move(r, tmp);
228 return tmp;
229 }
230 } else if (type == T_INT) {
231 r = LIR_OprFact::intConst(x);
232 if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
233 // This is all rather nasty. We don't know whether our constant
234 // is required for a logical or an arithmetic operation, wo we
235 // don't know what the range of valid values is!!
236 LIR_Opr tmp = new_register(type);
237 __ move(r, tmp);
238 return tmp;
288 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
289
290 if (is_power_of_2(c - 1)) {
291 __ shift_left(left, exact_log2(c - 1), tmp);
292 __ add(tmp, left, result);
293 return true;
294 } else if (is_power_of_2(c + 1)) {
295 __ shift_left(left, exact_log2(c + 1), tmp);
296 __ sub(tmp, left, result);
297 return true;
298 } else {
299 return false;
300 }
301 }
302
303 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
304 BasicType type = item->type();
305 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
306 }
307
308 //----------------------------------------------------------------------
309 // visitor functions
310 //----------------------------------------------------------------------
311
312
313 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
314 assert(x->is_pinned(),"");
315 bool needs_range_check = x->compute_needs_range_check();
316 bool use_length = x->length() != NULL;
317 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
318 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
319 !get_jobject_constant(x->value())->is_null_object() ||
320 x->should_profile());
321
322 LIRItem array(x->array(), this);
323 LIRItem index(x->index(), this);
324 LIRItem value(x->value(), this);
325 LIRItem length(this);
326
327 array.load_item();
328 index.load_nonconstant();
329
330 if (use_length && needs_range_check) {
331 length.set_instruction(x->length());
332 length.load_item();
333
334 }
335 if (needs_store_check || x->check_boolean()) {
336 value.load_item();
337 } else {
338 value.load_for_store(x->elt_type());
339 }
340
341 set_no_result(x);
342
343 // the CodeEmitInfo must be duplicated for each different
344 // LIR-instruction because spilling can occur anywhere between two
345 // instructions and so the debug information must be different
346 CodeEmitInfo* range_check_info = state_for(x);
347 CodeEmitInfo* null_check_info = NULL;
348 if (x->needs_null_check()) {
349 null_check_info = new CodeEmitInfo(range_check_info);
350 }
351
352 // emit array address setup early so it schedules better
353 // FIXME? No harm in this on aarch64, and it might help
354 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
355
356 if (GenerateRangeChecks && needs_range_check) {
357 if (use_length) {
358 __ cmp(lir_cond_belowEqual, length.result(), index.result());
359 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
360 } else {
361 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
362 // range_check also does the null check
363 null_check_info = NULL;
364 }
365 }
366
367 if (GenerateArrayStoreCheck && needs_store_check) {
368 LIR_Opr tmp1 = new_register(objectType);
369 LIR_Opr tmp2 = new_register(objectType);
370 LIR_Opr tmp3 = new_register(objectType);
371
372 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
373 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
374 }
375
376 if (obj_store) {
377 // Needs GC write barriers.
378 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
379 true /* do_load */, false /* patch */, NULL);
380 __ move(value.result(), array_addr, null_check_info);
381 // Seems to be a precise
382 post_barrier(LIR_OprFact::address(array_addr), value.result());
383 } else {
384 LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
385 __ move(result, array_addr, null_check_info);
386 }
387 }
388
389 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
390 assert(x->is_pinned(),"");
391 LIRItem obj(x->obj(), this);
392 obj.load_item();
393
394 set_no_result(x);
395
396 // "lock" stores the address of the monitor stack slot, so this is not an oop
397 LIR_Opr lock = new_register(T_INT);
398 // Need a scratch register for biased locking
399 LIR_Opr scratch = LIR_OprFact::illegalOpr;
400 if (UseBiasedLocking) {
401 scratch = new_register(T_INT);
402 }
403
404 CodeEmitInfo* info_for_exception = NULL;
405 if (x->needs_null_check()) {
406 info_for_exception = state_for(x);
407 }
408 // this CodeEmitInfo must not have the xhandlers because here the
754 LIRItem left(x->x(), this);
755 LIRItem right(x->y(), this);
756 ValueTag tag = x->x()->type()->tag();
757 if (tag == longTag) {
758 left.set_destroys_register();
759 }
760 left.load_item();
761 right.load_item();
762 LIR_Opr reg = rlock_result(x);
763
764 if (x->x()->type()->is_float_kind()) {
765 Bytecodes::Code code = x->op();
766 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
767 } else if (x->x()->type()->tag() == longTag) {
768 __ lcmp2int(left.result(), right.result(), reg);
769 } else {
770 Unimplemented();
771 }
772 }
773
774 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
775 assert(x->number_of_arguments() == 4, "wrong type");
776 LIRItem obj (x->argument_at(0), this); // object
777 LIRItem offset(x->argument_at(1), this); // offset of field
778 LIRItem cmp (x->argument_at(2), this); // value to compare with field
779 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
780
781 assert(obj.type()->tag() == objectTag, "invalid type");
782
783 // In 64bit the type can be long, sparc doesn't have this assert
784 // assert(offset.type()->tag() == intTag, "invalid type");
785
786 assert(cmp.type()->tag() == type->tag(), "invalid type");
787 assert(val.type()->tag() == type->tag(), "invalid type");
788
789 // get address of field
790 obj.load_item();
791 offset.load_nonconstant();
792 val.load_item();
793 cmp.load_item();
794
795 LIR_Address* a;
796 if(offset.result()->is_constant()) {
797 jlong c = offset.result()->as_jlong();
798 if ((jlong)((jint)c) == c) {
799 a = new LIR_Address(obj.result(),
800 (jint)c,
801 as_BasicType(type));
802 } else {
803 LIR_Opr tmp = new_register(T_LONG);
804 __ move(offset.result(), tmp);
805 a = new LIR_Address(obj.result(),
806 tmp,
807 as_BasicType(type));
808 }
809 } else {
810 a = new LIR_Address(obj.result(),
811 offset.result(),
812 0,
813 as_BasicType(type));
814 }
815 LIR_Opr addr = new_pointer_register();
816 __ leal(LIR_OprFact::address(a), addr);
817
818 if (type == objectType) { // Write-barrier needed for Object fields.
819 // Do the pre-write barrier, if any.
820 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
821 true /* do_load */, false /* patch */, NULL);
822 }
823
824 LIR_Opr result = rlock_result(x);
825
826 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
827 if (type == objectType)
828 __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
829 result);
830 else if (type == intType)
831 __ cas_int(addr, cmp.result(), val.result(), ill, ill);
832 else if (type == longType)
833 __ cas_long(addr, cmp.result(), val.result(), ill, ill);
834 else {
835 ShouldNotReachHere();
836 }
837
838 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
839
840 if (type == objectType) { // Write-barrier needed for Object fields.
841 // Seems to be precise
842 post_barrier(addr, val.result());
843 }
844 }
845
846 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
847 switch (x->id()) {
848 case vmIntrinsics::_dabs:
849 case vmIntrinsics::_dsqrt: {
850 assert(x->number_of_arguments() == 1, "wrong type");
851 LIRItem value(x->argument_at(0), this);
852 value.load_item();
853 LIR_Opr dst = rlock_result(x);
854
855 switch (x->id()) {
856 case vmIntrinsics::_dsqrt: {
857 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
858 break;
859 }
860 case vmIntrinsics::_dabs: {
861 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
862 break;
863 }
1341 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1342
1343 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1344 CodeEmitInfo* info) {
1345 __ volatile_store_mem_reg(value, address, info);
1346 }
1347
1348 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1349 CodeEmitInfo* info) {
1350 // 8179954: We need to make sure that the code generated for
1351 // volatile accesses forms a sequentially-consistent set of
1352 // operations when combined with STLR and LDAR. Without a leading
1353 // membar it's possible for a simple Dekker test to fail if loads
1354 // use LD;DMB but stores use STLR. This can happen if C2 compiles
1355 // the stores in one method and C1 compiles the loads in another.
1356 if (! UseBarriersForVolatile) {
1357 __ membar();
1358 }
1359
1360 __ volatile_load_mem_reg(address, result, info);
1361 }
1362
1363 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1364 BasicType type, bool is_volatile) {
1365 LIR_Address* addr = new LIR_Address(src, offset, type);
1366 __ load(addr, dst);
1367 }
1368
1369
1370 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1371 BasicType type, bool is_volatile) {
1372 LIR_Address* addr = new LIR_Address(src, offset, type);
1373 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1374 if (is_obj) {
1375 // Do the pre-write barrier, if any.
1376 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1377 true /* do_load */, false /* patch */, NULL);
1378 __ move(data, addr);
1379 assert(src->is_register(), "must be register");
1380 // Seems to be a precise address
1381 post_barrier(LIR_OprFact::address(addr), data);
1382 } else {
1383 __ move(data, addr);
1384 }
1385 }
1386
1387 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1388 BasicType type = x->basic_type();
1389 LIRItem src(x->object(), this);
1390 LIRItem off(x->offset(), this);
1391 LIRItem value(x->value(), this);
1392
1393 src.load_item();
1394 off.load_nonconstant();
1395
1396 // We can cope with a constant increment in an xadd
1397 if (! (x->is_add()
1398 && value.is_constant()
1399 && can_inline_as_constant(x->value()))) {
1400 value.load_item();
1401 }
1402
1403 LIR_Opr dst = rlock_result(x, type);
1404 LIR_Opr data = value.result();
1405 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1406 LIR_Opr offset = off.result();
1407
1408 if (data == dst) {
1409 LIR_Opr tmp = new_register(data->type());
1410 __ move(data, tmp);
1411 data = tmp;
1412 }
1413
1414 LIR_Address* addr;
1415 if (offset->is_constant()) {
1416 jlong l = offset->as_jlong();
1417 assert((jlong)((jint)l) == l, "offset too large for constant");
1418 jint c = (jint)l;
1419 addr = new LIR_Address(src.result(), c, type);
1420 } else {
1421 addr = new LIR_Address(src.result(), offset, type);
1422 }
1423
1424 LIR_Opr tmp = new_register(T_INT);
1425 LIR_Opr ptr = LIR_OprFact::illegalOpr;
1426
1427 if (x->is_add()) {
1428 __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
1429 } else {
1430 if (is_obj) {
1431 // Do the pre-write barrier, if any.
1432 ptr = new_pointer_register();
1433 __ add(src.result(), off.result(), ptr);
1434 pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1435 true /* do_load */, false /* patch */, NULL);
1436 }
1437 __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1438 if (is_obj) {
1439 post_barrier(ptr, data);
1440 }
1441 }
1442 }
|
168 large_disp = 0;
169 }
170 } else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
171 // index is illegal so replace it with the displacement loaded into a register
172 index = new_pointer_register();
173 __ move(LIR_OprFact::intptrConst(large_disp), index);
174 large_disp = 0;
175 }
176
177 // at this point we either have base + index or base + displacement
178 if (large_disp == 0) {
179 return new LIR_Address(base, index, type);
180 } else {
181 assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
182 return new LIR_Address(base, large_disp, type);
183 }
184 }
185
186
187 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
188 BasicType type) {
189 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
190 int elem_size = type2aelembytes(type);
191 int shift = exact_log2(elem_size);
192
193 LIR_Address* addr;
194 if (index_opr->is_constant()) {
195 addr = new LIR_Address(array_opr,
196 offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
197 } else {
198 if (offset_in_bytes) {
199 LIR_Opr tmp = new_pointer_register();
200 __ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
201 array_opr = tmp;
202 offset_in_bytes = 0;
203 }
204 addr = new LIR_Address(array_opr,
205 index_opr,
206 LIR_Address::scale(type),
207 offset_in_bytes, type);
208 }
209 return addr;
210 }
211
212 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
213 LIR_Opr r;
214 if (type == T_LONG) {
215 r = LIR_OprFact::longConst(x);
216 if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
217 LIR_Opr tmp = new_register(type);
218 __ move(r, tmp);
219 return tmp;
220 }
221 } else if (type == T_INT) {
222 r = LIR_OprFact::intConst(x);
223 if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
224 // This is all rather nasty. We don't know whether our constant
225 // is required for a logical or an arithmetic operation, wo we
226 // don't know what the range of valid values is!!
227 LIR_Opr tmp = new_register(type);
228 __ move(r, tmp);
229 return tmp;
279 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
280
281 if (is_power_of_2(c - 1)) {
282 __ shift_left(left, exact_log2(c - 1), tmp);
283 __ add(tmp, left, result);
284 return true;
285 } else if (is_power_of_2(c + 1)) {
286 __ shift_left(left, exact_log2(c + 1), tmp);
287 __ sub(tmp, left, result);
288 return true;
289 } else {
290 return false;
291 }
292 }
293
294 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
295 BasicType type = item->type();
296 __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
297 }
298
299 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
300 LIR_Opr tmp1 = new_register(objectType);
301 LIR_Opr tmp2 = new_register(objectType);
302 LIR_Opr tmp3 = new_register(objectType);
303 __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
304 }
305
306 //----------------------------------------------------------------------
307 // visitor functions
308 //----------------------------------------------------------------------
309
310 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
311 assert(x->is_pinned(),"");
312 LIRItem obj(x->obj(), this);
313 obj.load_item();
314
315 set_no_result(x);
316
317 // "lock" stores the address of the monitor stack slot, so this is not an oop
318 LIR_Opr lock = new_register(T_INT);
319 // Need a scratch register for biased locking
320 LIR_Opr scratch = LIR_OprFact::illegalOpr;
321 if (UseBiasedLocking) {
322 scratch = new_register(T_INT);
323 }
324
325 CodeEmitInfo* info_for_exception = NULL;
326 if (x->needs_null_check()) {
327 info_for_exception = state_for(x);
328 }
329 // this CodeEmitInfo must not have the xhandlers because here the
675 LIRItem left(x->x(), this);
676 LIRItem right(x->y(), this);
677 ValueTag tag = x->x()->type()->tag();
678 if (tag == longTag) {
679 left.set_destroys_register();
680 }
681 left.load_item();
682 right.load_item();
683 LIR_Opr reg = rlock_result(x);
684
685 if (x->x()->type()->is_float_kind()) {
686 Bytecodes::Code code = x->op();
687 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
688 } else if (x->x()->type()->tag() == longTag) {
689 __ lcmp2int(left.result(), right.result(), reg);
690 } else {
691 Unimplemented();
692 }
693 }
694
695 LIR_Opr LIRGenerator::cas(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
696 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
697 new_value.load_item();
698 cmp_value.load_item();
699 LIR_Opr result = new_register(T_INT);
700 if (type == T_OBJECT || type == T_ARRAY) {
701 __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
702 } else if (type == T_INT) {
703 __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
704 } else if (type == T_LONG) {
705 __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
706 } else {
707 ShouldNotReachHere();
708 Unimplemented();
709 }
710 __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
711 return result;
712 }
713
714 LIR_Opr LIRGenerator::swap(BasicType type, LIR_Opr addr, LIRItem& value) {
715 bool is_oop = type == T_OBJECT || type == T_ARRAY;
716 LIR_Opr result = new_register(type);
717 value.load_item();
718 assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
719 LIR_Opr tmp = new_register(T_INT);
720 __ xchg(addr, value.result(), result, tmp);
721 return result;
722 }
723
724 LIR_Opr LIRGenerator::add(BasicType type, LIR_Opr addr, LIRItem& value) {
725 LIR_Opr result = new_register(type);
726 value.load_item();
727 assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
728 LIR_Opr tmp = new_register(T_INT);
729 __ xadd(addr, value.result(), result, tmp);
730 return result;
731 }
732
733 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
734 switch (x->id()) {
735 case vmIntrinsics::_dabs:
736 case vmIntrinsics::_dsqrt: {
737 assert(x->number_of_arguments() == 1, "wrong type");
738 LIRItem value(x->argument_at(0), this);
739 value.load_item();
740 LIR_Opr dst = rlock_result(x);
741
742 switch (x->id()) {
743 case vmIntrinsics::_dsqrt: {
744 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
745 break;
746 }
747 case vmIntrinsics::_dabs: {
748 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
749 break;
750 }
1228 void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1229
1230 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1231 CodeEmitInfo* info) {
1232 __ volatile_store_mem_reg(value, address, info);
1233 }
1234
1235 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1236 CodeEmitInfo* info) {
1237 // 8179954: We need to make sure that the code generated for
1238 // volatile accesses forms a sequentially-consistent set of
1239 // operations when combined with STLR and LDAR. Without a leading
1240 // membar it's possible for a simple Dekker test to fail if loads
1241 // use LD;DMB but stores use STLR. This can happen if C2 compiles
1242 // the stores in one method and C1 compiles the loads in another.
1243 if (! UseBarriersForVolatile) {
1244 __ membar();
1245 }
1246
1247 __ volatile_load_mem_reg(address, result, info);
1248 }
|