< prev index next >

src/hotspot/cpu/arm/frame_arm.cpp

Print this page


   1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  40 #include "vmreg_arm.inline.hpp"
  41 #ifdef COMPILER1
  42 #include "c1/c1_Runtime1.hpp"
  43 #include "runtime/vframeArray.hpp"
  44 #endif
  45 #include "prims/methodHandles.hpp"
  46 
  47 #ifdef ASSERT
  48 void RegisterMap::check_location_valid() {
  49 }
  50 #endif
  51 
  52 
  53 // Profiling/safepoint support
  54 
  55 bool frame::safe_for_sender(JavaThread *thread) {
  56   address   sp = (address)_sp;
  57   address   fp = (address)_fp;
  58   address   unextended_sp = (address)_unextended_sp;
  59 
  60   static size_t stack_guard_size = os::uses_stack_guard_pages() ?
  61     (JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size()) : 0;
  62   size_t usable_stack_size = thread->stack_size() - stack_guard_size;
  63 
  64   // sp must be within the usable part of the stack (not in guards)
  65   bool sp_safe = (sp != NULL &&
  66                  (sp <= thread->stack_base()) &&
  67                  (sp >= thread->stack_base() - usable_stack_size));
  68 
  69   if (!sp_safe) {
  70     return false;
  71   }
  72 
  73   bool unextended_sp_safe = (unextended_sp != NULL &&
  74                              (unextended_sp <= thread->stack_base()) &&
  75                              (unextended_sp >= sp));
  76   if (!unextended_sp_safe) {
  77     return false;
  78   }
  79 
  80   // We know sp/unextended_sp are safe. Only fp is questionable here.
  81 
  82   bool fp_safe = (fp != NULL &&
  83                   (fp <= thread->stack_base()) &&
  84                   fp >= sp);
  85 
  86   if (_cb != NULL ) {
  87 
  88     // First check if frame is complete and tester is reliable
  89     // Unfortunately we can only check frame complete for runtime stubs and nmethod
  90     // other generic buffer blobs are more problematic so we just assume they are
  91     // ok. adapter blobs never have a frame complete and are never ok.
  92 
  93     if (!_cb->is_frame_complete_at(_pc)) {
  94       if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
  95         return false;
  96       }
  97     }
  98 
  99     // Could just be some random pointer within the codeBlob
 100     if (!_cb->code_contains(_pc)) {
 101       return false;
 102     }
 103 


 131       // With our calling conventions, the return_address should
 132       // end up being the word on the stack
 133       sender_pc = (address) *(sender_sp - sender_sp_offset + return_addr_offset);
 134     }
 135 
 136     // We must always be able to find a recognizable pc
 137     CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
 138     if (sender_pc == NULL || sender_blob == NULL) {
 139       return false;
 140     }
 141 
 142 
 143     // If the potential sender is the interpreter then we can do some more checking
 144     if (Interpreter::contains(sender_pc)) {
 145 
 146       // FP is always saved in a recognizable place in any code we generate. However
 147       // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved FP
 148       // is really a frame pointer.
 149 
 150       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset + link_offset);
 151       bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
 152 
 153       if (!saved_fp_safe) {
 154         return false;
 155       }
 156 
 157       // construct the potential sender
 158 
 159       frame sender(sender_sp, saved_fp, sender_pc);
 160 
 161       return sender.is_interpreted_frame_valid(thread);
 162     }
 163 
 164     if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
 165       return false;
 166     }
 167 
 168     // Could just be some random pointer within the codeBlob
 169     if (!sender_blob->code_contains(sender_pc)) {
 170       return false;
 171     }
 172 
 173     // We should never be able to see an adapter if the current frame is something from code cache
 174     if (sender_blob->is_adapter_blob()) {
 175       return false;
 176     }
 177 
 178     // Could be the call_stub
 179     if (StubRoutines::returns_to_call_stub(sender_pc)) {
 180       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset + link_offset);
 181       bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp >= sender_sp);
 182 
 183       if (!saved_fp_safe) {
 184         return false;
 185       }
 186 
 187       // construct the potential sender
 188 
 189       frame sender(sender_sp, saved_fp, sender_pc);
 190 
 191       // Validate the JavaCallWrapper an entry frame must have
 192       address jcw = (address)sender.entry_frame_call_wrapper();
 193 
 194       bool jcw_safe = (jcw <= thread->stack_base()) && (jcw > (address)sender.fp());
 195 
 196       return jcw_safe;
 197     }
 198 
 199     // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
 200     // because the return address counts against the callee's frame.
 201 
 202     if (sender_blob->frame_size() <= 0) {
 203       assert(!sender_blob->is_compiled(), "should count return address at least");
 204       return false;
 205     }
 206 
 207     // We should never be able to see anything here except an nmethod. If something in the
 208     // code cache (current frame) is called by an entity within the code cache that entity
 209     // should not be anything but the call stub (already covered), the interpreter (already covered)
 210     // or an nmethod.
 211 
 212     if (!sender_blob->is_compiled()) {
 213       return false;
 214     }


 484 
 485   if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
 486     return false;
 487   }
 488 
 489   // validate bci/bcp
 490 
 491   address bcp = interpreter_frame_bcp();
 492   if (m->validate_bci_from_bcp(bcp) < 0) {
 493     return false;
 494   }
 495 
 496   // validate ConstantPoolCache*
 497   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
 498   if (MetaspaceObj::is_valid(cp) == false) return false;
 499 
 500   // validate locals
 501 
 502   address locals =  (address) *interpreter_frame_locals_addr();
 503 
 504   if (locals > thread->stack_base() || locals < (address) fp()) return false;
 505 
 506   // We'd have to be pretty unlucky to be mislead at this point
 507 
 508   return true;
 509 }
 510 
 511 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
 512   assert(is_interpreted_frame(), "interpreted frame expected");
 513   Method* method = interpreter_frame_method();
 514   BasicType type = method->result_type();
 515 
 516   intptr_t* res_addr;
 517   if (method->is_native()) {
 518     // Prior to calling into the runtime to report the method_exit both of
 519     // the possible return value registers are saved.
 520     // Return value registers are pushed to the native stack
 521     res_addr = (intptr_t*)sp();
 522 #ifdef __ABI_HARD__
 523     // FP result is pushed onto a stack along with integer result registers
 524     if (type == T_FLOAT || type == T_DOUBLE) {


   1 /*
   2  * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  40 #include "vmreg_arm.inline.hpp"
  41 #ifdef COMPILER1
  42 #include "c1/c1_Runtime1.hpp"
  43 #include "runtime/vframeArray.hpp"
  44 #endif
  45 #include "prims/methodHandles.hpp"
  46 
  47 #ifdef ASSERT
  48 void RegisterMap::check_location_valid() {
  49 }
  50 #endif
  51 
  52 
  53 // Profiling/safepoint support
  54 
  55 bool frame::safe_for_sender(JavaThread *thread) {
  56   address   sp = (address)_sp;
  57   address   fp = (address)_fp;
  58   address   unextended_sp = (address)_unextended_sp;
  59 
  60   // consider stack guards when trying to determine "safe" stack pointers



  61   // sp must be within the usable part of the stack (not in guards)
  62   if (!thread->is_in_usable_stack(sp)) {




  63     return false;
  64   }
  65 
  66   bool unextended_sp_safe = (unextended_sp != NULL &&
  67                              (unextended_sp < thread->stack_base()) &&
  68                              (unextended_sp >= sp));
  69   if (!unextended_sp_safe) {
  70     return false;
  71   }
  72 
  73   // We know sp/unextended_sp are safe. Only fp is questionable here.
  74 
  75   bool fp_safe = (fp != NULL &&
  76                   (fp < thread->stack_base()) &&
  77                   fp >= sp);
  78 
  79   if (_cb != NULL ) {
  80 
  81     // First check if frame is complete and tester is reliable
  82     // Unfortunately we can only check frame complete for runtime stubs and nmethod
  83     // other generic buffer blobs are more problematic so we just assume they are
  84     // ok. adapter blobs never have a frame complete and are never ok.
  85 
  86     if (!_cb->is_frame_complete_at(_pc)) {
  87       if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
  88         return false;
  89       }
  90     }
  91 
  92     // Could just be some random pointer within the codeBlob
  93     if (!_cb->code_contains(_pc)) {
  94       return false;
  95     }
  96 


 124       // With our calling conventions, the return_address should
 125       // end up being the word on the stack
 126       sender_pc = (address) *(sender_sp - sender_sp_offset + return_addr_offset);
 127     }
 128 
 129     // We must always be able to find a recognizable pc
 130     CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
 131     if (sender_pc == NULL || sender_blob == NULL) {
 132       return false;
 133     }
 134 
 135 
 136     // If the potential sender is the interpreter then we can do some more checking
 137     if (Interpreter::contains(sender_pc)) {
 138 
 139       // FP is always saved in a recognizable place in any code we generate. However
 140       // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved FP
 141       // is really a frame pointer.
 142 
 143       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset + link_offset);
 144       bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
 145 
 146       if (!saved_fp_safe) {
 147         return false;
 148       }
 149 
 150       // construct the potential sender
 151 
 152       frame sender(sender_sp, saved_fp, sender_pc);
 153 
 154       return sender.is_interpreted_frame_valid(thread);
 155     }
 156 
 157     if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
 158       return false;
 159     }
 160 
 161     // Could just be some random pointer within the codeBlob
 162     if (!sender_blob->code_contains(sender_pc)) {
 163       return false;
 164     }
 165 
 166     // We should never be able to see an adapter if the current frame is something from code cache
 167     if (sender_blob->is_adapter_blob()) {
 168       return false;
 169     }
 170 
 171     // Could be the call_stub
 172     if (StubRoutines::returns_to_call_stub(sender_pc)) {
 173       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset + link_offset);
 174       bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp >= sender_sp);
 175 
 176       if (!saved_fp_safe) {
 177         return false;
 178       }
 179 
 180       // construct the potential sender
 181 
 182       frame sender(sender_sp, saved_fp, sender_pc);
 183 
 184       // Validate the JavaCallWrapper an entry frame must have
 185       address jcw = (address)sender.entry_frame_call_wrapper();
 186 
 187       bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp());
 188 
 189       return jcw_safe;
 190     }
 191 
 192     // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
 193     // because the return address counts against the callee's frame.
 194 
 195     if (sender_blob->frame_size() <= 0) {
 196       assert(!sender_blob->is_compiled(), "should count return address at least");
 197       return false;
 198     }
 199 
 200     // We should never be able to see anything here except an nmethod. If something in the
 201     // code cache (current frame) is called by an entity within the code cache that entity
 202     // should not be anything but the call stub (already covered), the interpreter (already covered)
 203     // or an nmethod.
 204 
 205     if (!sender_blob->is_compiled()) {
 206       return false;
 207     }


 477 
 478   if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
 479     return false;
 480   }
 481 
 482   // validate bci/bcp
 483 
 484   address bcp = interpreter_frame_bcp();
 485   if (m->validate_bci_from_bcp(bcp) < 0) {
 486     return false;
 487   }
 488 
 489   // validate ConstantPoolCache*
 490   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
 491   if (MetaspaceObj::is_valid(cp) == false) return false;
 492 
 493   // validate locals
 494 
 495   address locals =  (address) *interpreter_frame_locals_addr();
 496 
 497   if (locals >= thread->stack_base() || locals < (address) fp()) return false;
 498 
 499   // We'd have to be pretty unlucky to be mislead at this point
 500 
 501   return true;
 502 }
 503 
 504 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
 505   assert(is_interpreted_frame(), "interpreted frame expected");
 506   Method* method = interpreter_frame_method();
 507   BasicType type = method->result_type();
 508 
 509   intptr_t* res_addr;
 510   if (method->is_native()) {
 511     // Prior to calling into the runtime to report the method_exit both of
 512     // the possible return value registers are saved.
 513     // Return value registers are pushed to the native stack
 514     res_addr = (intptr_t*)sp();
 515 #ifdef __ABI_HARD__
 516     // FP result is pushed onto a stack along with integer result registers
 517     if (type == T_FLOAT || type == T_DOUBLE) {


< prev index next >