8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_CODE_VTABLESTUBS_HPP
26 #define SHARE_VM_CODE_VTABLESTUBS_HPP
27
28 #include "code/vmreg.hpp"
29 #include "memory/allocation.hpp"
30
31 // A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables
32 // There's a one-to-one relationship between a VtableStub and such a pair.
33
34 class VtableStub {
35 private:
36 friend class VtableStubs;
37
38 static address _chunk; // For allocation
39 static address _chunk_end; // For allocation
40 static VMReg _receiver_location; // Where to find receiver
41
42 VtableStub* _next; // Pointer to next entry in hash table
43 const short _index; // vtable index
44 short _ame_offset; // Where an AbstractMethodError might occur
45 short _npe_offset; // Where a NullPointerException might occur
46 bool _is_vtable_stub; // True if vtable stub, false, is itable stub
47 /* code follows here */ // The vtableStub code
48
49 void* operator new(size_t size, int code_size) throw();
50
51 VtableStub(bool is_vtable_stub, int index)
52 : _next(NULL), _is_vtable_stub(is_vtable_stub),
53 _index(index), _ame_offset(-1), _npe_offset(-1) {}
54 VtableStub* next() const { return _next; }
55 int index() const { return _index; }
56 static VMReg receiver_location() { return _receiver_location; }
57 void set_next(VtableStub* n) { _next = n; }
58
59 public:
60 address code_begin() const { return (address)(this + 1); }
61 address code_end() const { return code_begin() + pd_code_size_limit(_is_vtable_stub); }
62 address entry_point() const { return code_begin(); }
63 static int entry_offset() { return sizeof(class VtableStub); }
64
65 bool matches(bool is_vtable_stub, int index) const {
66 return _index == index && _is_vtable_stub == is_vtable_stub;
67 }
68 bool contains(address pc) const { return code_begin() <= pc && pc < code_end(); }
69
70 private:
71 void set_exception_points(address npe_addr, address ame_addr) {
72 _npe_offset = npe_addr - code_begin();
73 _ame_offset = ame_addr - code_begin();
74 assert(is_abstract_method_error(ame_addr), "offset must be correct");
75 assert(is_null_pointer_exception(npe_addr), "offset must be correct");
76 assert(!is_abstract_method_error(npe_addr), "offset must be correct");
77 assert(!is_null_pointer_exception(ame_addr), "offset must be correct");
78 }
79
80 // platform-dependent routines
81 static int pd_code_size_limit(bool is_vtable_stub);
82 static int pd_code_alignment();
83 // CNC: Removed because vtable stubs are now made with an ideal graph
84 // static bool pd_disregard_arg_size();
85
86 static void align_chunk() {
87 uintptr_t off = (uintptr_t)( _chunk + sizeof(VtableStub) ) % pd_code_alignment();
88 if (off != 0) _chunk += pd_code_alignment() - off;
89 }
90
91 public:
92 // Query
93 bool is_itable_stub() { return !_is_vtable_stub; }
94 bool is_vtable_stub() { return _is_vtable_stub; }
95 bool is_abstract_method_error(address epc) { return epc == code_begin()+_ame_offset; }
96 bool is_null_pointer_exception(address epc) { return epc == code_begin()+_npe_offset; }
97
98 void print_on(outputStream* st) const;
99 void print() const { print_on(tty); }
100
101 };
102
103
104 // VtableStubs creates the code stubs for compiled calls through vtables.
105 // There is one stub per (vtable index, args_size) pair, and the stubs are
106 // never deallocated. They don't need to be GCed because they contain no oops.
107
108 class VtableStubs : AllStatic {
109 public: // N must be public (some compilers need this for _table)
110 enum {
111 N = 256, // size of stub table; must be power of two
112 mask = N - 1
113 };
114
115 private:
116 static VtableStub* _table[N]; // table of existing stubs
117 static int _number_of_vtable_stubs; // number of stubs created so far (for statistics)
118
119 static VtableStub* create_vtable_stub(int vtable_index);
120 static VtableStub* create_itable_stub(int vtable_index);
121 static VtableStub* lookup (bool is_vtable_stub, int vtable_index);
122 static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s);
123 static inline uint hash (bool is_vtable_stub, int vtable_index);
124 static address find_stub (bool is_vtable_stub, int vtable_index);
125
126 public:
127 static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); }
128 static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
129 static VtableStub* entry_point(address pc); // vtable stub entry point for a pc
130 static bool contains(address pc); // is pc within any stub?
131 static VtableStub* stub_containing(address pc); // stub containing pc or NULL
132 static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
133 static void initialize();
134 static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs
135 };
136
137 #endif // SHARE_VM_CODE_VTABLESTUBS_HPP
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_CODE_VTABLESTUBS_HPP
26 #define SHARE_VM_CODE_VTABLESTUBS_HPP
27
28 #include "asm/macroAssembler.hpp"
29 #include "code/vmreg.hpp"
30 #include "memory/allocation.hpp"
31
32 // A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables
33 // There's a one-to-one relationship between a VtableStub and such a pair.
34
35 // A word on VtableStub sizing:
36 // Such a vtable/itable stub consists of the instance data
37 // and an immediately following CodeBuffer.
38 // Unfortunately, the required space for the code buffer varies, depending on
39 // the setting of compile time macros (PRODUCT, ASSERT, ...) and of command line
40 // parameters. Actual data may have an influence on the size as well.
41 //
42 // A simple approximation for the VtableStub size would be to just take a value
43 // "large enough" for all circumstances - a worst case estimate.
44 // As there can exist many stubs - and they never go away - we certainly don't
45 // want to waste more code cache space than absolutely necessary.
46 //
47 // We need a different approach which, as far as possible, should be independent
48 // from or adaptive to code size variations. These variations may be caused by
49 // changed compile time or run time switches as well as by changed emitter code.
50 //
51 // Here is the idea:
52 // For the first stub we generate, we allocate a "large enough" code buffer.
53 // Once all instructions are emitted, we know the actual size of the stub.
54 // Remembering that size allows us to allocate a tightly matching code buffer
55 // for all subsequent stubs. That covers all "static variance", i.e. all variance
56 // that is due to compile time macros, command line parameters, machine capabilities,
57 // and other influences which are immutable for the life span of the vm.
58 //
59 // Life isn't always that easy. Code size may depend on actual data, "load constant"
60 // being an example for that. All code segments with such "dynamic variance" require
61 // additional care. We need to know or estimate the worst case code size for each
62 // such segment. With that knowledge, we can maintain a "slop counter" in the
63 // platform-specific stub emitters. It accumulates the difference between worst-case
64 // and actual code size. When the stub is fully generated, the actual stub size is
65 // adjusted (increased) by the slop counter value.
66 //
67 // As a result, we allocate all but the first code buffers with the same, tightly matching size.
68 //
69
70 class VtableStub {
71 private:
72 friend class VtableStubs;
73
74 static address _chunk; // For allocation
75 static address _chunk_end; // For allocation
76 static VMReg _receiver_location; // Where to find receiver
77
78 VtableStub* _next; // Pointer to next entry in hash table
79 const short _index; // vtable index
80 short _ame_offset; // Where an AbstractMethodError might occur
81 short _npe_offset; // Where a NullPointerException might occur
82 bool _is_vtable_stub; // True if vtable stub, false, is itable stub
83 /* code follows here */ // The vtableStub code
84
85 void* operator new(size_t size, int code_size) throw();
86
87 VtableStub(bool is_vtable_stub, int index)
88 : _next(NULL), _is_vtable_stub(is_vtable_stub),
89 _index(index), _ame_offset(-1), _npe_offset(-1) {}
90 VtableStub* next() const { return _next; }
91 int index() const { return _index; }
92 static VMReg receiver_location() { return _receiver_location; }
93 void set_next(VtableStub* n) { _next = n; }
94
95 public:
96 address code_begin() const { return (address)(this + 1); }
97 address code_end() const { return code_begin() + code_size_limit(_is_vtable_stub); }
98 address entry_point() const { return code_begin(); }
99 static int entry_offset() { return sizeof(class VtableStub); }
100
101 bool matches(bool is_vtable_stub, int index) const {
102 return _index == index && _is_vtable_stub == is_vtable_stub;
103 }
104 bool contains(address pc) const { return code_begin() <= pc && pc < code_end(); }
105
106 private:
107 void set_exception_points(address npe_addr, address ame_addr) {
108 _npe_offset = npe_addr - code_begin();
109 _ame_offset = ame_addr - code_begin();
110 assert(is_abstract_method_error(ame_addr), "offset must be correct");
111 assert(is_null_pointer_exception(npe_addr), "offset must be correct");
112 assert(!is_abstract_method_error(npe_addr), "offset must be correct");
113 assert(!is_null_pointer_exception(ame_addr), "offset must be correct");
114 }
115
116 // platform-dependent routines
117 static int pd_code_alignment();
118 // CNC: Removed because vtable stubs are now made with an ideal graph
119 // static bool pd_disregard_arg_size();
120
121 static void align_chunk() {
122 uintptr_t off = (uintptr_t)( _chunk + sizeof(VtableStub) ) % pd_code_alignment();
123 if (off != 0) _chunk += pd_code_alignment() - off;
124 }
125
126 public:
127 // Query
128 bool is_itable_stub() { return !_is_vtable_stub; }
129 bool is_vtable_stub() { return _is_vtable_stub; }
130 bool is_abstract_method_error(address epc) { return epc == code_begin()+_ame_offset; }
131 bool is_null_pointer_exception(address epc) { return epc == code_begin()+_npe_offset; }
132
133 static int code_size_limit( bool is_vtable_stub );
134 static void check_and_set_size_limit( bool is_vtable_stub,
135 int code_size,
136 int padding );
137
138 void print_on(outputStream* st) const;
139 void print() const { print_on(tty); }
140
141 };
142
143
144 // VtableStubs creates the code stubs for compiled calls through vtables.
145 // There is one stub per (vtable index, args_size) pair, and the stubs are
146 // never deallocated. They don't need to be GCed because they contain no oops.
147
148 class VtableStubs : AllStatic {
149 public: // N must be public (some compilers need this for _table)
150 enum {
151 N = 256, // size of stub table; must be power of two
152 mask = N - 1
153 };
154
155 private:
156 static VtableStub* _table[N]; // table of existing stubs
157 static int _number_of_vtable_stubs; // number of stubs created so far (for statistics)
158
159 static VtableStub* create_vtable_stub(int vtable_index);
160 static VtableStub* create_itable_stub(int vtable_index);
161 static VtableStub* lookup (bool is_vtable_stub, int vtable_index);
162 static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s);
163 static inline uint hash (bool is_vtable_stub, int vtable_index);
164 static address find_stub (bool is_vtable_stub, int vtable_index);
165 static void bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
166 address npe_addr, address ame_addr, bool is_vtable_stub,
167 int index, int slop_bytes, int slop32);
168
169 public:
170 static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); }
171 static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
172
173 // SAPJVM PJ 2007-09-24 introduce flexible code buffer size for the stubs
174 static int _vtab_stub_size;
175 static int _itab_stub_size;
176
177 static VtableStub* entry_point(address pc); // vtable stub entry point for a pc
178 static bool contains(address pc); // is pc within any stub?
179 static VtableStub* stub_containing(address pc); // stub containing pc or NULL
180 static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
181 static void initialize();
182 static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs
183 };
184
185 #endif // SHARE_VM_CODE_VTABLESTUBS_HPP
|