graph TB
PerformCall(PerformCall)-->|Interpreter|ArtInterpreterToInterpreterBridge(interpreter::ArtInterpreterToInterpreterBridge)
ArtInterpreterToInterpreterBridge-->ExecuteSwitchImplCpp(interpreter::ExecuteSwitchImplCpp)
ExecuteSwitchImplCpp-->DoInvoke(case Instruction::xxx: interpreter::DoInvoke, find called ArtMethod)
DoInvoke-->DoCallCommon(interpreter::DoCallCommon, prepare shadow frame)
DoCallCommon-->PerformCall
ArtMethod::Invoke-->art_quick_invoke_stub(art_quick_invoke_stub: function in quick_entry_point.S)
artQuickToInterpreterBridge(artQuickToInterpreterBridge, prepare shadow frame)-->EnterInterpreterFromEntryPoint(interpreter::EnterInterpreterFromEntryPoint)
EnterInterpreterFromEntryPoint-->ExecuteSwitchImplCpp
PerformCall-->|CompiledCode|ArtInterpreterToCompiledCodeBridge(interpreter::ArtInterpreterToCompiledCodeBridge)
ArtInterpreterToCompiledCodeBridge-->ArtMethod::Invoke
compiledCode-->|Interpreter|artQuickToInterpreterBridge
art_quick_invoke_stub-->|ART_METHOD_QUICK_CODE_OFFSET_32|IsJniMethod{Is JNI method?}
IsJniMethod-->|no|entry_point_from_quick_compiled_code_(entry_point_from_quick_compiled_code_ linkCode时设置)
IsJniMethod-->|yes|art_quick_generic_jni_trampoline_汇编
art_quick_generic_jni_trampoline_汇编-->artQuickGenericJniTrampoline
artQuickGenericJniTrampoline-->data_(data_RegisterNative时设置)
data_-->IsJniDlsymLookupStub{IsJniDlsymLookupStub?}
IsJniDlsymLookupStub-->|no|nativeCode
IsJniDlsymLookupStub-->|yes|artFindNativeMethod
entry_point_from_quick_compiled_code_-->IsCompiled{Is Compiled?}
IsCompiled-->|yes|compiledCode
IsCompiled-->|no|art_quick_to_interpreter_bridge汇编
art_quick_to_interpreter_bridge汇编-->artQuickToInterpreterBridge
sequenceDiagram
Trampoline->>Trampoline:artQuickGenericJniTrampoline
activate Trampoline
Trampoline->>Trampoline:void const* nativeCode = artMethod->GetEntryPointFromJni()
activate Trampoline
Note over Trampoline,artFindNativeMethod: if(nativeCode == GetJniDlsymLookupStub())
Trampoline->>artFindNativeMethod:artFindNativeMethod
activate artFindNativeMethod
artFindNativeMethod->>JavaVMExt: void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
artFindNativeMethod->>artFindNativeMethod:method->RegisterNative(native_code);
activate JavaVMExt
JavaVMExt->>JavaVMExt: for (const auto& lib : libraries_)
Note right of JavaVMExt: library->FindSymbol(jni_short_name, arg_shorty);
Note right of JavaVMExt: library->FindSymbol(jni_long_name, arg_shorty);
deactivate JavaVMExt
deactivate artFindNativeMethod
deactivate Trampoline
deactivate Trampoline
sequenceDiagram
FindMethodFromCode->>ClassLinker:ResolveMethod
ClassLinker->>DexCache:getResolvedMethod
DexCache->>ClassLinker: return
ClassLinker->>ClassLinker: if resolvedMethod is null,ResolveMethod
activate ClassLinker
ClassLinker->>ClassLinker: ResolveType
activate ClassLinker
Note right of ClassLinker: resolve the declaring class
deactivate ClassLinker
ClassLinker->>ClassLinker: FindResolvedMethod
activate ClassLinker
ClassLinker->>ClassLinker: klass->FindClassMethod
activate ClassLinker
deactivate ClassLinker
ClassLinker->>DexCache: dex_cache->SetResolvedMethod
deactivate ClassLinker
deactivate ClassLinker
resolvetype会触发类加载流程在linkcode时methodentrypoint
所有Trampoline code都是一段汇编代码编写的函数,这段汇编代码函数内部一般会跳转到一个由更高级的编程语言(C++)实现的函数。
class Thread{
......
struct PACKED(sizeof(void*)) tls_ptr_sized_values {
......
//针对jni方法的Trampoline code,只包含一个pDlsymLookup函数指针,对应的Trampoline code在jni_entrypoints_x86.S里实现。
//结构体,和JNI调用有关。里边只有一个函数指针成员变量,名为pDlsymLookup。当JNI函数未注册时,这个成员变量将被调用以找到目标JNI函数
JniEntryPoints jni_entrypoints;
//针对非jni方法的Trampoline code,一共包含132个函数指针,对应的Trampoline code在quick_entrypoints_x86.S里实现。
//结构体,其成员变量全是个函数指针类型,其定义可参考quick_entrypoints_list.h。它包含了一些由ART虚拟机提供的某些功能,而我们编译得到的机器码可能会用到它们。生成机器码时,我们需要生成对应的调用指令以跳转到这些函数
QuickEntryPoints quick_entrypoints;
......
} tlsPtr_;
}
art/runtime/interpreter/interpreter.cc
void ArtInterpreterToInterpreterBridge(Thread* self,
const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame,
JValue* result) {
self->PushShadowFrame(shadow_frame);
ArtMethod* method = shadow_frame->GetMethod();
if (LIKELY(!shadow_frame->GetMethod()->IsNative())) {
result->SetJ(Execute(self, accessor, *shadow_frame, JValue()).GetJ());//main
} else {
// We don't expect to be asked to interpret native code (which is entered via a JNI compiler
// generated stub) except during testing and image writing.
CHECK(!Runtime::Current()->IsStarted());
ObjPtr<mirror::Object> receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
uint32_t* args = shadow_frame->GetVRegArgs(is_static ? 0 : 1);
UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver.Ptr(), args, result);
}
self->PopShadowFrame();
static inline JValue Execute(
Thread* self,
const CodeItemDataAccessor& accessor,
ShadowFrame& shadow_frame,
JValue result_register,
bool stay_in_interpreter = false) REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(shadow_frame.GetDexPC() == 0)) { // Entering the method, but not via deoptimization.
if (!stay_in_interpreter) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
jit->MethodEntered(self, shadow_frame.GetMethod());
if (jit->CanInvokeCompiledCode(method)) {
JValue result;
// Pop the shadow frame before calling into compiled code.
self->PopShadowFrame();
//Calculate the offset of the first input reg. The input registers are in the high regs.
//It's ok to access the code item here since JIT code will have been touched by the
// interpreter and compiler already.
uint16_t arg_offset = accessor.RegistersSize() - accessor.InsSize();
ArtInterpreterToCompiledCodeBridge(self, nullptr, &shadow_frame, arg_offset, &result);
// Push the shadow frame back as the caller will expect it.
self->PushShadowFrame(&shadow_frame);
return result;
}
}
}
}
ArtMethod* method = shadow_frame.GetMethod();//method为当前帧的方法
return ExecuteSwitchImpl<false, false>(self, accessor, shadow_frame, result_register,
false);
art/runtime/interpreter/interpreter_switch_impl.h/cc
// Wrapper around the switch interpreter which ensures we can unwind through it.
template<bool do_access_check, bool transaction_active>
ALWAYS_INLINE JValue ExecuteSwitchImpl(Thread* self, const CodeItemDataAccessor& accessor,
ShadowFrame& shadow_frame, JValue result_register,
bool interpret_one_instruction)
REQUIRES_SHARED(Locks::mutator_lock_) {
SwitchImplContext ctx {
.self = self,
.accessor = accessor,
.shadow_frame = shadow_frame,
.result_register = result_register,
.interpret_one_instruction = interpret_one_instruction,
.result = JValue(),
};
void* impl = reinterpret_cast<void*>(&ExecuteSwitchImplCpp<do_access_check, transaction_active>);//main
const uint16_t* dex_pc = ctx.accessor.Insns();
ExecuteSwitchImplAsm(&ctx, impl, dex_pc);
return ctx.result;
}
template<bool do_access_check, bool transaction_active>
void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
Thread* self = ctx->self;
const CodeItemDataAccessor& accessor = ctx->accessor;
ShadowFrame& shadow_frame = ctx->shadow_frame;
JValue result_register = ctx->result_register;
bool interpret_one_instruction = ctx->interpret_one_instruction;
constexpr bool do_assignability_check = do_access_check;
self->VerifyStack();
uint32_t dex_pc = shadow_frame.GetDexPC();
const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
const uint16_t* const insns = accessor.Insns();
const Instruction* inst = Instruction::At(insns + dex_pc);
uint16_t inst_data;
jit::Jit* jit = Runtime::Current()->GetJit();
do {
dex_pc = inst->GetDexPc(insns);
shadow_frame.SetDexPC(dex_pc);
TraceExecution(shadow_frame, inst, dex_pc);
inst_data = inst->Fetch16(0);
switch (inst->Opcode(inst_data)) {
case Instruction::INVOKE_DIRECT: {
PREAMBLE();
bool success = DoInvoke<kDirect, false, do_access_check>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_STATIC: {
PREAMBLE();
bool success = DoInvoke<kStatic, false, do_access_check>(
self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
}
} while (!interpret_one_instruction);
// Record where we stopped.
shadow_frame.SetDexPC(inst->GetDexPc(insns));
ctx->result = result_register;
return;
art/runtime/interpreter/interpreter_common.h
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
template<InvokeType type, bool is_range, bool do_access_check>
static inline bool DoInvoke(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
uint16_t inst_data,
JValue* result) {
// Make sure to check for async exceptions before anything else.
if (UNLIKELY(self->ObserveAsyncException())) {
return false;
}
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
ObjPtr<mirror::Object> receiver =
(type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
ArtMethod* sf_method = shadow_frame.GetMethod();//发起调用的方法
ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
method_idx, &receiver, sf_method, self);//被调用方法
// The shadow frame should already be pushed, so we don't need to update it.
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr && (type == kVirtual || type == kInterface)) {
jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
}
// TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
if (type == kVirtual || type == kInterface) {
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
instrumentation->InvokeVirtualOrInterface(
self, receiver.Ptr(), sf_method, shadow_frame.GetDexPC(), called_method);
}
}
return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data, result);
art/runtime/interpreter/interpreter_common.cc
template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
const uint16_t number_of_inputs =
(is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
// TODO: find a cleaner way to separate non-range and range information without duplicating
// code.
uint32_t arg[Instruction::kMaxVarArgRegs] = {}; // only used in invoke-XXX.
uint32_t vregC = 0;
if (is_range) {
vregC = inst->VRegC_3rc();
} else {
vregC = inst->VRegC_35c();
inst->GetVarArgs(arg, inst_data);
}
return DoCallCommon<is_range, do_assignability_check>(
called_method, self, shadow_frame,
result, number_of_inputs, arg, vregC);
}
art/runtime/interpreter/interpreter_common.cc
template <bool is_range,
bool do_assignability_check>
static inline bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
JValue* result,
uint16_t number_of_inputs,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
uint32_t vregC) {
// Compute method information.
CodeItemDataAccessor accessor(called_method->DexInstructionData());
// Number of registers for the callee's call frame.
uint16_t num_regs;
// Test whether to use the interpreter or compiler entrypoint, and save that result to pass to
// PerformCall. A deoptimization could occur at any time, and we shouldn't change which
// entrypoint to use once we start building the shadow frame.
// For unstarted runtimes, always use the interpreter entrypoint. This fixes the case where we are
// doing cross compilation. Note that GetEntryPointFromQuickCompiledCode doesn't use the image
// pointer size here and this may case an overflow if it is called from the compiler. b/62402160
const bool use_interpreter_entrypoint = !Runtime::Current()->IsStarted() ||
ClassLinker::ShouldUseInterpreterEntrypoint(
called_method,
called_method->GetEntryPointFromQuickCompiledCode());
......
// Allocate shadow frame on the stack.
//called_method是被调用的方法
const char* old_cause = self->StartAssertNoThreadSuspension("DoCallCommon");
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
......
PerformCall(self,
accessor,
shadow_frame.GetMethod(),
first_dest_reg,
new_shadow_frame,
result,
use_interpreter_entrypoint);
art/runtime/common_dex_operations.h
inline void PerformCall(Thread* self,
const CodeItemDataAccessor& accessor,
ArtMethod* caller_method,
const size_t first_dest_reg,
ShadowFrame* callee_frame,
JValue* result,
bool use_interpreter_entrypoint)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(Runtime::Current()->IsStarted())) {
if (use_interpreter_entrypoint) {
interpreter::ArtInterpreterToInterpreterBridge(self, accessor, callee_frame, result);
} else {
interpreter::ArtInterpreterToCompiledCodeBridge(
self, caller_method, callee_frame, first_dest_reg, result);
}
} else {
interpreter::UnstartedRuntime::Invoke(self, accessor, callee_frame, result, first_dest_reg);
}
}
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
ShadowFrame* shadow_frame,
uint16_t arg_offset,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
......
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr && caller != nullptr) {
jit->NotifyInterpreterToCompiledCodeTransition(self, caller);
}
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
result, method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty());
art/runtime/art_method.cc
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (!IsStatic()) {
(*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
} else {
(*art_quick_invoke_static_stub)(this, args, args_size, self, result, shorty);//in art/runtime/arch/x86/quick_entrypoints_x86.S
}
namespace art {
extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*, const char*);
extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*, const char*);
art/runtime/arch/x86/quick_entrypoints_x86.S
DEFINE_FUNCTION art_quick_invoke_stub
mov 20(%ebp), %eax // move method pointer into eax
call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method. java method转到art_quick_to_interpreter_bridge,jni method转到art_quick_generic_jni_trampoline
DEFINE_FUNCTION art_quick_to_interpreter_bridge
PUSH eax // pass method
call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
art/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
......
JValue result;
if (UNLIKELY(deopt_frame != nullptr)) {
HandleDeoptimization(&result, method, deopt_frame, &fragment);
} else {
const char* old_cause = self->StartAssertNoThreadSuspension(
"Building interpreter shadow frame");
uint16_t num_regs = accessor.RegistersSize();
// No last shadow coming from quick.
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize();
BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
shadow_frame, first_arg_reg);
shadow_frame_builder.VisitArguments();
const bool needs_initialization =
method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
// Push a transition back into managed code onto the linked list in thread.
self->PushManagedStackFragment(&fragment);
self->PushShadowFrame(shadow_frame);
self->EndAssertNoThreadSuspension(old_cause);
if (needs_initialization) {
// Ensure static method's class is initialized.
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass()));
if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
DCHECK(Thread::Current()->IsExceptionPending())
<< shadow_frame->GetMethod()->PrettyMethod();
self->PopManagedStackFragment(fragment);
return 0;
}
}
result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame);
}
JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame) {
DCHECK_EQ(self, Thread::Current());
bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
return JValue();
}
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
jit->NotifyCompiledCodeToInterpreterTransition(self, shadow_frame->GetMethod());
}
return Execute(self, accessor, *shadow_frame, JValue());
}
DEFINE_FUNCTION art_quick_generic_jni_trampoline
call SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
// On x86 there are no registers passed, so nothing to pop here.
// Native call.
call *%eax //调用native层的具体方法实现
extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Retrieve the stored native code.
void const* nativeCode = called->GetEntryPointFromJni();
// There are two cases for the content of nativeCode:
// 1) Pointer to the native function.
// 2) Pointer to the trampoline for native code binding.
// In the second case, we need to execute the binding and continue with the actual native function
// pointer.
// after find the native method pointer, set it to nativeCode variable to avoid find native method again
DCHECK(nativeCode != nullptr);
if (nativeCode == GetJniDlsymLookupStub()) {
#if defined(__arm__) || defined(__aarch64__)
nativeCode = artFindNativeMethod();
#else
nativeCode = artFindNativeMethod(self);
#endif
art/runtime/entrypoints/jni/jni_entrypoints.cc
extern "C" const void* artFindNativeMethod(Thread* self) {
DCHECK_EQ(self, Thread::Current());
#endif
Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
ScopedObjectAccess soa(self);
ArtMethod* method = self->GetCurrentMethod(nullptr);//获取当前线程栈顶方法
DCHECK(method != nullptr);
// Lookup symbol address for method, on failure we'll return null with an exception set,
// otherwise we return the address of the method we found.
void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
if (native_code == nullptr) {
self->AssertPendingException();
return nullptr;
}
// Register so that future calls don't come here
return method->RegisterNative(native_code);
}
art/runtime/java_vm_ext.cc
void* JavaVMExt::FindCodeForNativeMethod(ArtMethod* m) {
CHECK(m->IsNative());
mirror::Class* c = m->GetDeclaringClass();
// If this is a static method, it could be called before the class has been initialized.
CHECK(c->IsInitializing()) << c->GetStatus() << " " << m->PrettyMethod();
std::string detail;
Thread* const self = Thread::Current();
void* native_method = libraries_->FindNativeMethod(self, m, detail);
if (native_method == nullptr) {
// Lookup JNI native methods from native TI Agent libraries. See runtime/ti/agent.h for more
// information. Agent libraries are searched for native methods after all jni libraries.
native_method = FindCodeForNativeMethodInAgents(m);
}
// Throwing can cause libraries_lock to be reacquired.
if (native_method == nullptr) {
LOG(ERROR) << detail;
self->ThrowNewException("Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
}
return native_method;
}
// See section 11.3 "Linking Native Methods" of the JNI spec.
void* FindNativeMethod(Thread* self, ArtMethod* m, std::string& detail)
REQUIRES(!Locks::jni_libraries_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::string jni_short_name(m->JniShortName());//"Java_com_example_myapplication_loadLibrary_jni_XHCoreJni_getSignatureKey"
std::string jni_long_name(m->JniLongName());//"Java_com_example_myapplication_loadLibrary_jni_XHCoreJni_getSignatureKey__"
const char* shorty = m->GetShorty();
{
// Go to suspended since dlsym may block for a long time if other threads are using dlopen.
ScopedThreadSuspension sts(self, kNative);
void* native_code = FindNativeMethodInternal(self,
declaring_class_loader_allocator,
shorty,
jni_short_name,
jni_long_name);
if (native_code != nullptr) {
return native_code;
}
}
return nullptr;
}
void* FindNativeMethodInternal(Thread* self,
void* declaring_class_loader_allocator,
const char* shorty,
const std::string& jni_short_name,
const std::string& jni_long_name)
REQUIRES(!Locks::jni_libraries_lock_)
REQUIRES(!Locks::mutator_lock_) {
MutexLock mu(self, *Locks::jni_libraries_lock_);
for (const auto& lib : libraries_) {
SharedLibrary* const library = lib.second;
// Use the allocator address for class loader equality to avoid unnecessary weak root decode.
if (library->GetClassLoaderAllocator() != declaring_class_loader_allocator) {
// We only search libraries loaded by the appropriate ClassLoader.
continue;
}
// Try the short name then the long name...
const char* arg_shorty = library->NeedsNativeBridge() ? shorty : nullptr;
void* fn = library->FindSymbol(jni_short_name, arg_shorty);
if (fn == nullptr) {
fn = library->FindSymbol(jni_long_name, arg_shorty);
}
if (fn != nullptr) {
VLOG(jni) << "[Found native code for " << jni_long_name
<< " in \"" << library->GetPath() << "\"]";
return fn;
}
}
return nullptr;
}
art/runtime/entrypoints/entrypoint_utils-inl.h
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
ObjPtr<mirror::Object>* this_object,
ArtMethod* referrer,
Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
constexpr ClassLinker::ResolveMode resolve_mode =
access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
: ClassLinker::ResolveMode::kNoChecks;
ArtMethod* resolved_method;
if (type == kStatic) {
resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
} else {
StackHandleScope<1> hs(self);
HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
}
art/runtime/class_linker-inl.h
template <ClassLinker::ResolveMode kResolveMode>
inline ArtMethod* ClassLinker::ResolveMethod(Thread* self,
uint32_t method_idx,
ArtMethod* referrer,
InvokeType type) {
// We do not need the read barrier for getting the DexCache for the initial resolved method
// lookup as both from-space and to-space copies point to the same native resolved methods array.
ArtMethod* resolved_method = referrer->GetDexCache<kWithoutReadBarrier>()->GetResolvedMethod(
method_idx, image_pointer_size_);
if (UNLIKELY(resolved_method == nullptr)) {
referrer = referrer->GetInterfaceMethodIfProxy(image_pointer_size_);
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
resolved_method = ResolveMethod<kResolveMode>(method_idx,
h_dex_cache,
h_class_loader,
referrer,
type);
}
art/runtime/mirror/dex_cache-inl.h
inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
auto pair = GetNativePairPtrSize(GetResolvedMethods(), MethodSlotIndex(method_idx), ptr_size);
return pair.GetObjectForIndex(method_idx);
}
art/runtime/class_linker.cc
template <ClassLinker::ResolveMode kResolveMode>
ArtMethod* ClassLinker::ResolveMethod(uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
ArtMethod* referrer,
InvokeType type) {
....
// The method was not in the DexCache, resolve the declaring class.
klass = ResolveType(method_id.class_idx_, dex_cache, class_loader);
resolved = FindResolvedMethod(klass, dex_cache.Get(), class_loader.Get(), method_idx);
// If we found a method, check for incompatible class changes.
if (LIKELY(resolved != nullptr) &&
LIKELY(kResolveMode == ResolveMode::kNoChecks ||
!resolved->CheckIncompatibleClassChange(type))) {
return resolved;
} else {
// If we had a method, or if we can find one with another lookup type,
// it's an incompatible-class-change error.
if (resolved == nullptr) {
resolved = FindIncompatibleMethod(klass, dex_cache.Get(), class_loader.Get(), method_idx);
}
if (resolved != nullptr) {
ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer);
} else {
// We failed to find the method (using all lookup types), so throw a NoSuchMethodError.
const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
const Signature signature = dex_file.GetMethodSignature(method_id);
ThrowNoSuchMethodError(type, klass, name, signature);
}
Thread::Current()->AssertPendingException();
return nullptr;
ArtMethod* ClassLinker::FindResolvedMethod(ObjPtr<mirror::Class> klass,
ObjPtr<mirror::DexCache> dex_cache,
ObjPtr<mirror::ClassLoader> class_loader,
uint32_t method_idx) {
// Search for the method using dex_cache and method_idx. The Class::Find*Method()
// functions can optimize the search if the dex_cache is the same as the DexCache
// of the class, with fall-back to name and signature search otherwise
ArtMethod* resolved = nullptr;
if (klass->IsInterface()) {
resolved = klass->FindInterfaceMethod(dex_cache, method_idx, image_pointer_size_);
} else {
resolved = klass->FindClassMethod(dex_cache, method_idx, image_pointer_size_);
}
if (resolved != nullptr &&
hiddenapi::GetMemberAction(
resolved, class_loader, dex_cache, hiddenapi::kLinking) == hiddenapi::kDeny) {
resolved = nullptr;
}
if (resolved != nullptr) {
// In case of jmvti, the dex file gets verified before being registered, so first
// check if it's registered before checking class tables.
const DexFile& dex_file = *dex_cache->GetDexFile();
DCHECK(!IsDexFileRegistered(Thread::Current(), dex_file) ||
FindClassTable(Thread::Current(), dex_cache) == ClassTableForClassLoader(class_loader))
<< "DexFile referrer: " << dex_file.GetLocation()
<< " ClassLoader: " << DescribeLoaders(class_loader, "");
// Be a good citizen and update the dex cache to speed subsequent calls.
dex_cache->SetResolvedMethod(method_idx, resolved, image_pointer_size_);
// Disable the following invariant check as the verifier breaks it. b/73760543
// const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
// DCHECK(LookupResolvedType(method_id.class_idx_, dex_cache, class_loader) != nullptr)
// << "Method: " << resolved->PrettyMethod() << ", "
// << "Class: " << klass->PrettyClass() << " (" << klass->GetStatus() << "), "
// << "DexFile referrer: " << dex_file.GetLocation();
}
return resolved;
art/runtime/native/java_lang_reflect_Method.cc
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Method, invoke, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"),
};
static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
jobjectArray javaArgs) {
ScopedFastNativeObjectAccess soa(env);
return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs);
}
art/runtime/reflection.cc
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaMethod,
jobject javaReceiver, jobject javaArgs, size_t num_frames) {
ObjPtr<mirror::Executable> executable = soa.Decode<mirror::Executable>(javaMethod);
const bool accessible = executable->IsAccessible();
ArtMethod* m = executable->GetArtMethod();
ObjPtr<mirror::Class> declaring_class = m->GetDeclaringClass();
// Check that the receiver is non-null and an instance of the field's declaring class.
receiver = soa.Decode<mirror::Object>(javaReceiver);
if (!VerifyObjectIsClass(receiver, declaring_class)) {
return nullptr;
}
// Find the actual implementation of the virtual method.
m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m, kRuntimePointerSize);
InvokeWithArgArray(soa, m, &arg_array, &result, shorty);
}
art/runtime/mirror/class-inl.h
inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method,
PointerSize pointer_size) {
if (method->IsDirect()) {
return method;
}
if (method->GetDeclaringClass()->IsInterface() && !method->IsCopied()) {
return FindVirtualMethodForInterface(method, pointer_size);
}
return FindVirtualMethodForVirtual(method, pointer_size);
}
void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
ArtMethod* method, ArgArray* arg_array, JValue* result,
const char* shorty)
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->IsCheckJniEnabled())) {
CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(kRuntimePointerSize), args);
}
method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
}
bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code)
//method->IsNative() 判断是否是jni方法(在java中标记native关键字的方法)
if (UNLIKELY(method->IsNative() || method->IsProxyMethod())) {
return false;
}
if (quick_code == nullptr) {
return true;
}
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
if (instr->InterpretOnly()) {
return true;
}
if (runtime->GetClassLinker()->IsQuickToInterpreterBridge(quick_code)) {
// Doing this check avoids doing compiled/interpreter transitions.
return true;
}
if (Dbg::IsForcedInterpreterNeededForCalling(Thread::Current(), method)) {
// Force the use of interpreter when it is required by the debugger.
return true;
}
if (Thread::Current()->IsAsyncExceptionPending()) {
// Force use of interpreter to handle async-exceptions
return true;
}
if (runtime->IsJavaDebuggable()) {
// For simplicity, we ignore precompiled code and go to the interpreter
// assuming we don't already have jitted code.
// We could look at the oat file where `quick_code` is being defined,
// and check whether it's been compiled debuggable, but we decided to
// only rely on the JIT for debuggable apps.
jit::Jit* jit = Runtime::Current()->GetJit();
return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
}
if (runtime->IsNativeDebuggable()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
// since we want to JIT it (at first use) with extra stackmaps for native
// debugging. We keep however all AOT code from the boot image,
// since the JIT-at-first-use is blocking and would result in non-negligible
// startup performance impact.
return !runtime->GetHeap()->IsInBootImageOatFile(quick_code);
}
return false;
}
art/runtime/jit/jit.cc
bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
}
art::shadowFrame * link_ //nextFrame below current frame, eg, caller
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, jvalue* args) {
ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj);
ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid));
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
InvokeWithArgArray(soa, method, &arg_array, &result, shorty);
return result;
inline CodeItemDataAccessor ArtMethod::DexInstructionData() {
return CodeItemDataAccessor(*GetDexFile(), GetCodeItem());
}
inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
return GetDexFile()->GetCodeItem(GetCodeItemOffset());
}
inline const DexFile* ArtMethod::GetDexFile() {
// It is safe to avoid the read barrier here since the dex file is constant, so if we read the
// from-space dex file pointer it will be equal to the to-space copy.
return GetDexCache<kWithoutReadBarrier>()->GetDexFile();
}
template <ReadBarrierOption kReadBarrierOption>
inline mirror::DexCache* ArtMethod::GetDexCache() {
if (LIKELY(!IsObsolete<kReadBarrierOption>())) {
mirror::Class* klass = GetDeclaringClass<kReadBarrierOption>();
return klass->GetDexCache<kDefaultVerifyFlags, kReadBarrierOption>();
} else {
DCHECK(!IsProxyMethod());
return GetObsoleteDexCache();
}
}
const void* GetEntryPointFromQuickCompiledCode() {
return GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
}
ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(PointerSize pointer_size) {
return GetNativePointer<const void*>(
EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
}
static MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*)
* static_cast<size_t>(pointer_size));
}