< prev index next >

src/cpu/aarch64/vm/interp_masm_aarch64.cpp

Print this page




 172   get_cache_index_at_bcp(index, bcp_offset, index_size);
 173   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 174   // convert from field index to ConstantPoolCacheEntry
 175   // aarch64 already has the cache in rcpool so there is no need to
 176   // install it in cache. instead we pre-add the indexed offset to
 177   // rcpool and return it in cache. All clients of this method need to
 178   // be modified accordingly.
 179   add(cache, rcpool, index, Assembler::LSL, 5);
 180 }
 181 
 182 
 183 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
 184                                                                         Register index,
 185                                                                         Register bytecode,
 186                                                                         int byte_no,
 187                                                                         int bcp_offset,
 188                                                                         size_t index_size) {
 189   get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
 190   // We use a 32-bit load here since the layout of 64-bit words on
 191   // little-endian machines allow us that.
 192   // n.b. unlike x86 cache alreeady includes the index offset
 193   ldrw(bytecode, Address(cache,
 194                          ConstantPoolCache::base_offset()
 195                          + ConstantPoolCacheEntry::indices_offset()));

 196   const int shift_count = (1 + byte_no) * BitsPerByte;
 197   ubfx(bytecode, bytecode, shift_count, BitsPerByte);
 198 }
 199 
 200 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 201                                                                Register tmp,
 202                                                                int bcp_offset,
 203                                                                size_t index_size) {
 204   assert(cache != tmp, "must use different register");
 205   get_cache_index_at_bcp(tmp, bcp_offset, index_size);
 206   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 207   // convert from field index to ConstantPoolCacheEntry index
 208   // and from word offset to byte offset
 209   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 210   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 211   // skip past the header
 212   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 213   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 214 }
 215 




 172   get_cache_index_at_bcp(index, bcp_offset, index_size);
 173   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 174   // convert from field index to ConstantPoolCacheEntry
 175   // aarch64 already has the cache in rcpool so there is no need to
 176   // install it in cache. instead we pre-add the indexed offset to
 177   // rcpool and return it in cache. All clients of this method need to
 178   // be modified accordingly.
 179   add(cache, rcpool, index, Assembler::LSL, 5);
 180 }
 181 
 182 
 183 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
 184                                                                         Register index,
 185                                                                         Register bytecode,
 186                                                                         int byte_no,
 187                                                                         int bcp_offset,
 188                                                                         size_t index_size) {
 189   get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
 190   // We use a 32-bit load here since the layout of 64-bit words on
 191   // little-endian machines allow us that.
 192   // n.b. unlike x86 cache already includes the index offset
 193   lea(bytecode, Address(cache,
 194                          ConstantPoolCache::base_offset()
 195                          + ConstantPoolCacheEntry::indices_offset()));
 196   ldarw(bytecode, bytecode);
 197   const int shift_count = (1 + byte_no) * BitsPerByte;
 198   ubfx(bytecode, bytecode, shift_count, BitsPerByte);
 199 }
 200 
 201 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
 202                                                                Register tmp,
 203                                                                int bcp_offset,
 204                                                                size_t index_size) {
 205   assert(cache != tmp, "must use different register");
 206   get_cache_index_at_bcp(tmp, bcp_offset, index_size);
 207   assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
 208   // convert from field index to ConstantPoolCacheEntry index
 209   // and from word offset to byte offset
 210   assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");
 211   ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));
 212   // skip past the header
 213   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
 214   add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord);  // construct pointer to cache entry
 215 }
 216 


< prev index next >