Source code

Revision control

Copy as Markdown

Other Tools

/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <string>
#include <cstring>
#include <cstdlib>
#include <cstdio>
#include <dlfcn.h>
#include <optional>
#include <unistd.h>
#include <errno.h>
#include <algorithm>
#include <fcntl.h>
#include "ElfLoader.h"
#include "BaseElf.h"
#include "CustomElf.h"
#include "Mappable.h"
#include "Logging.h"
#include "Utils.h"
#include <inttypes.h>
#include "mozilla/ScopeExit.h"
// From Utils.h
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gPageSize;
#if defined(ANDROID)
# include <sys/syscall.h>
# include <sys/system_properties.h>
# include <math.h>
# include <android/api-level.h>
/**
* Return the current Android version, or 0 on failure.
*/
static int GetAndroidSDKVersion() {
static int version = 0;
if (version) {
return version;
}
char version_string[PROP_VALUE_MAX] = {'\0'};
int len = __system_property_get("ro.build.version.sdk", version_string);
if (len) {
version = static_cast<int>(strtol(version_string, nullptr, 10));
}
return version;
}
#endif /* ANDROID */
#ifdef __ARM_EABI__
extern "C" MOZ_EXPORT const void* __gnu_Unwind_Find_exidx(void* pc, int* pcount)
__attribute__((weak));
#endif
/* Ideally we'd #include <link.h>, but that's a world of pain
* Moreover, not all versions of android support it, so we need a weak
* reference. */
extern "C" MOZ_EXPORT int dl_iterate_phdr(dl_phdr_cb callback, void* data)
__attribute__((weak));
/* Pointer to the PT_DYNAMIC section of the executable or library
* containing this code. */
extern "C" Elf::Dyn _DYNAMIC[];
/**
* dlfcn.h replacements functions
*/
void* __wrap_dlopen(const char* path, int flags) {
#if defined(ANDROID)
if (GetAndroidSDKVersion() >= 23) {
return dlopen(path, flags);
}
#endif
RefPtr<LibHandle> handle = ElfLoader::Singleton.Load(path, flags);
if (handle) handle->AddDirectRef();
return handle;
}
const char* __wrap_dlerror(void) {
#if defined(ANDROID)
if (GetAndroidSDKVersion() >= 23) {
return dlerror();
}
#endif
const char* error = ElfLoader::Singleton.lastError.exchange(nullptr);
if (error) {
// Return a custom error if available.
return error;
}
// Or fallback to the system error.
return dlerror();
}
void* __wrap_dlsym(void* handle, const char* symbol) {
#if defined(ANDROID)
if (GetAndroidSDKVersion() >= 23) {
return dlsym(handle, symbol);
}
#endif
if (!handle) {
ElfLoader::Singleton.lastError = "dlsym(NULL, sym) unsupported";
return nullptr;
}
if (handle != RTLD_DEFAULT && handle != RTLD_NEXT) {
LibHandle* h = reinterpret_cast<LibHandle*>(handle);
return h->GetSymbolPtr(symbol);
}
ElfLoader::Singleton.lastError = nullptr; // Use system dlerror.
return dlsym(handle, symbol);
}
int __wrap_dlclose(void* handle) {
#if defined(ANDROID)
if (GetAndroidSDKVersion() >= 23) {
return dlclose(handle);
}
#endif
if (!handle) {
ElfLoader::Singleton.lastError = "No handle given to dlclose()";
return -1;
}
reinterpret_cast<LibHandle*>(handle)->ReleaseDirectRef();
return 0;
}
int __wrap_dladdr(const void* addr, Dl_info* info) {
#if defined(ANDROID)
if (GetAndroidSDKVersion() >= 23) {
return dladdr(addr, info);
}
#endif
RefPtr<LibHandle> handle =
ElfLoader::Singleton.GetHandleByPtr(const_cast<void*>(addr));
if (!handle) {
return dladdr(addr, info);
}
info->dli_fname = handle->GetPath();
info->dli_fbase = handle->GetBase();
return 1;
}
class DlIteratePhdrHelper {
public:
DlIteratePhdrHelper() {
int pipefd[2];
valid_pipe = (pipe(pipefd) == 0);
read_fd.emplace(pipefd[0]);
write_fd.emplace(pipefd[1]);
}
int fill_and_call(dl_phdr_cb callback, const void* l_addr, const char* l_name,
void* data);
private:
bool valid_pipe;
std::optional<AutoCloseFD> read_fd;
std::optional<AutoCloseFD> write_fd;
};
// This function is called for each shared library iterated over by
// dl_iterate_phdr, and is used to fill a dl_phdr_info which is then
// sent through to the dl_iterate_phdr callback.
int DlIteratePhdrHelper::fill_and_call(dl_phdr_cb callback, const void* l_addr,
const char* l_name, void* data) {
dl_phdr_info info;
info.dlpi_addr = reinterpret_cast<Elf::Addr>(l_addr);
info.dlpi_name = l_name;
info.dlpi_phdr = nullptr;
info.dlpi_phnum = 0;
// Assuming l_addr points to Elf headers (in most cases, this is true),
// get the Phdr location from there.
// Unfortunately, when l_addr doesn't point to Elf headers, it may point
// to unmapped memory, or worse, unreadable memory. The only way to detect
// the latter without causing a SIGSEGV is to use the pointer in a system
// call that will try to read from there, and return an EFAULT error if
// it can't. One such system call is write(). It used to be possible to
// use a file descriptor on /dev/null for these kind of things, but recent
// Linux kernels never return an EFAULT error when using /dev/null.
// So instead, we use a self pipe. We do however need to read() from the
// read end of the pipe as well so as to not fill up the pipe buffer and
// block on subsequent writes.
// In the unlikely event reads from or write to the pipe fail for some
// other reason than EFAULT, we don't try any further and just skip setting
// the Phdr location for all subsequent libraries, rather than trying to
// start over with a new pipe.
int can_read = true;
if (valid_pipe) {
int ret;
char raw_ehdr[sizeof(Elf::Ehdr)];
static_assert(sizeof(raw_ehdr) < PIPE_BUF, "PIPE_BUF is too small");
do {
// writes are atomic when smaller than PIPE_BUF, per POSIX.1-2008.
ret = write(*write_fd, l_addr, sizeof(raw_ehdr));
} while (ret == -1 && errno == EINTR);
if (ret != sizeof(raw_ehdr)) {
if (ret == -1 && errno == EFAULT) {
can_read = false;
} else {
valid_pipe = false;
}
} else {
size_t nbytes = 0;
do {
// Per POSIX.1-2008, interrupted reads can return a length smaller
// than the given one instead of failing with errno EINTR.
ret = read(*read_fd, raw_ehdr + nbytes, sizeof(raw_ehdr) - nbytes);
if (ret > 0) nbytes += ret;
} while ((nbytes != sizeof(raw_ehdr) && ret > 0) ||
(ret == -1 && errno == EINTR));
if (nbytes != sizeof(raw_ehdr)) {
valid_pipe = false;
}
}
}
if (valid_pipe && can_read) {
const Elf::Ehdr* ehdr = Elf::Ehdr::validate(l_addr);
if (ehdr) {
info.dlpi_phdr = reinterpret_cast<const Elf::Phdr*>(
reinterpret_cast<const char*>(ehdr) + ehdr->e_phoff);
info.dlpi_phnum = ehdr->e_phnum;
}
}
return callback(&info, sizeof(dl_phdr_info), data);
}
int __wrap_dl_iterate_phdr(dl_phdr_cb callback, void* data) {
#if defined(ANDROID)
if (GetAndroidSDKVersion() >= 23) {
return dl_iterate_phdr(callback, data);
}
#endif
DlIteratePhdrHelper helper;
AutoLock lock(&ElfLoader::Singleton.handlesMutex);
if (dl_iterate_phdr) {
for (ElfLoader::LibHandleList::reverse_iterator it =
ElfLoader::Singleton.handles.rbegin();
it < ElfLoader::Singleton.handles.rend(); ++it) {
BaseElf* elf = (*it)->AsBaseElf();
if (!elf) {
continue;
}
int ret = helper.fill_and_call(callback, (*it)->GetBase(),
(*it)->GetPath(), data);
if (ret) return ret;
}
return dl_iterate_phdr(callback, data);
}
/* For versions of Android that don't support dl_iterate_phdr (< 5.0),
* we go through the debugger helper data, which is known to be racy, but
* there's not much we can do about this :( . */
if (!ElfLoader::Singleton.dbg) return -1;
for (ElfLoader::DebuggerHelper::iterator it =
ElfLoader::Singleton.dbg.begin();
it < ElfLoader::Singleton.dbg.end(); ++it) {
int ret = helper.fill_and_call(callback, it->l_addr, it->l_name, data);
if (ret) return ret;
}
return 0;
}
#ifdef __ARM_EABI__
const void* __wrap___gnu_Unwind_Find_exidx(void* pc, int* pcount) {
RefPtr<LibHandle> handle = ElfLoader::Singleton.GetHandleByPtr(pc);
if (handle) return handle->FindExidx(pcount);
if (__gnu_Unwind_Find_exidx) return __gnu_Unwind_Find_exidx(pc, pcount);
*pcount = 0;
return nullptr;
}
#endif
namespace {
/**
* Returns the part after the last '/' for the given path
*/
const char* LeafName(const char* path) {
const char* lastSlash = strrchr(path, '/');
if (lastSlash) return lastSlash + 1;
return path;
}
/**
* Run the given lambda while holding the internal lock of the system linker.
* To take the lock, we call the system dl_iterate_phdr and invoke the lambda
* from the callback, which is called while the lock is held. Return true on
* success.
*/
template <class Lambda>
static bool RunWithSystemLinkerLock(Lambda&& aLambda) {
if (!dl_iterate_phdr) {
// No dl_iterate_phdr support.
return false;
}
#if defined(ANDROID)
if (GetAndroidSDKVersion() < 23) {
// dl_iterate_phdr is _not_ protected by a lock on Android < 23.
// Also return false here if we failed to get the version.
return false;
}
#endif
dl_iterate_phdr(
[](dl_phdr_info*, size_t, void* lambda) -> int {
(*static_cast<Lambda*>(lambda))();
// Return 1 to stop iterating.
return 1;
},
&aLambda);
return true;
}
} /* Anonymous namespace */
/**
* LibHandle
*/
LibHandle::~LibHandle() { free(path); }
const char* LibHandle::GetName() const {
return path ? LeafName(path) : nullptr;
}
/**
* SystemElf
*/
already_AddRefed<LibHandle> SystemElf::Load(const char* path, int flags) {
/* The Android linker returns a handle when the file name matches an
* already loaded library, even when the full path doesn't exist */
if (path && path[0] == '/' && (access(path, F_OK) == -1)) {
DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, (void*)nullptr);
ElfLoader::Singleton.lastError = "Specified file does not exist";
return nullptr;
}
ElfLoader::Singleton.lastError = nullptr; // Use system dlerror.
void* handle = dlopen(path, flags);
DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, handle);
if (handle) {
SystemElf* elf = new SystemElf(path, handle);
ElfLoader::Singleton.Register(elf);
RefPtr<LibHandle> lib(elf);
return lib.forget();
}
return nullptr;
}
SystemElf::~SystemElf() {
if (!dlhandle) return;
DEBUG_LOG("dlclose(%p [\"%s\"])", dlhandle, GetPath());
ElfLoader::Singleton.lastError = nullptr; // Use system dlerror.
dlclose(dlhandle);
ElfLoader::Singleton.Forget(this);
}
void* SystemElf::GetSymbolPtr(const char* symbol) const {
ElfLoader::Singleton.lastError = nullptr; // Use system dlerror.
void* sym = dlsym(dlhandle, symbol);
DEBUG_LOG("dlsym(%p [\"%s\"], \"%s\") = %p", dlhandle, GetPath(), symbol,
sym);
return sym;
}
#ifdef __ARM_EABI__
const void* SystemElf::FindExidx(int* pcount) const {
/* TODO: properly implement when ElfLoader::GetHandleByPtr
does return SystemElf handles */
*pcount = 0;
return nullptr;
}
#endif
/**
* ElfLoader
*/
/* Unique ElfLoader instance */
ElfLoader ElfLoader::Singleton;
already_AddRefed<LibHandle> ElfLoader::Load(const char* path, int flags,
LibHandle* parent) {
/* Ensure logging is initialized or refresh if environment changed. */
Logging::Init();
/* Ensure self_elf initialization. */
if (!self_elf) Init();
RefPtr<LibHandle> handle;
/* Handle dlopen(nullptr) directly. */
if (!path) {
handle = SystemElf::Load(nullptr, flags);
return handle.forget();
}
/* TODO: Handle relative paths correctly */
const char* name = LeafName(path);
/* Search the list of handles we already have for a match. When the given
* path is not absolute, compare file names, otherwise compare full paths. */
if (name == path) {
AutoLock lock(&handlesMutex);
for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
if ((*it)->GetName() && (strcmp((*it)->GetName(), name) == 0)) {
handle = *it;
return handle.forget();
}
} else {
AutoLock lock(&handlesMutex);
for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
if ((*it)->GetPath() && (strcmp((*it)->GetPath(), path) == 0)) {
handle = *it;
return handle.forget();
}
}
char* abs_path = nullptr;
const char* requested_path = path;
/* When the path is not absolute and the library is being loaded for
* another, first try to load the library from the directory containing
* that parent library. */
if ((name == path) && parent) {
const char* parentPath = parent->GetPath();
abs_path = new char[strlen(parentPath) + strlen(path)];
strcpy(abs_path, parentPath);
char* slash = strrchr(abs_path, '/');
strcpy(slash + 1, path);
path = abs_path;
}
Mappable* mappable = GetMappableFromPath(path);
/* Try loading with the custom linker if we have a Mappable */
if (mappable) handle = CustomElf::Load(mappable, path, flags);
/* Try loading with the system linker if everything above failed */
if (!handle) handle = SystemElf::Load(path, flags);
/* If we didn't have an absolute path and haven't been able to load
* a library yet, try in the system search path */
if (!handle && abs_path) handle = SystemElf::Load(name, flags);
delete[] abs_path;
DEBUG_LOG("ElfLoader::Load(\"%s\", 0x%x, %p [\"%s\"]) = %p", requested_path,
flags, reinterpret_cast<void*>(parent),
parent ? parent->GetPath() : "", static_cast<void*>(handle));
return handle.forget();
}
already_AddRefed<LibHandle> ElfLoader::GetHandleByPtr(void* addr) {
AutoLock lock(&handlesMutex);
/* Scan the list of handles we already have for a match */
for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it) {
if ((*it)->Contains(addr)) {
RefPtr<LibHandle> lib = *it;
return lib.forget();
}
}
return nullptr;
}
Mappable* ElfLoader::GetMappableFromPath(const char* path) {
return Mappable::Create(path);
}
void ElfLoader::Register(LibHandle* handle) {
AutoLock lock(&handlesMutex);
handles.push_back(handle);
}
void ElfLoader::Register(CustomElf* handle) {
Register(static_cast<LibHandle*>(handle));
if (dbg) {
// We could race with the system linker when modifying the debug map, so
// only do so while holding the system linker's internal lock.
RunWithSystemLinkerLock([this, handle] { dbg.Add(handle); });
}
}
void ElfLoader::Forget(LibHandle* handle) {
/* Ensure logging is initialized or refresh if environment changed. */
Logging::Init();
AutoLock lock(&handlesMutex);
LibHandleList::iterator it =
std::find(handles.begin(), handles.end(), handle);
if (it != handles.end()) {
DEBUG_LOG("ElfLoader::Forget(%p [\"%s\"])", reinterpret_cast<void*>(handle),
handle->GetPath());
handles.erase(it);
} else {
DEBUG_LOG("ElfLoader::Forget(%p [\"%s\"]): Handle not found",
reinterpret_cast<void*>(handle), handle->GetPath());
}
}
void ElfLoader::Forget(CustomElf* handle) {
Forget(static_cast<LibHandle*>(handle));
if (dbg) {
// We could race with the system linker when modifying the debug map, so
// only do so while holding the system linker's internal lock.
RunWithSystemLinkerLock([this, handle] { dbg.Remove(handle); });
}
}
void ElfLoader::Init() {
Dl_info info;
/* On Android < 4.1 can't reenter dl* functions. So when the library
* containing this code is dlopen()ed, it can't call dladdr from a
* static initializer. */
if (dladdr(_DYNAMIC, &info) != 0) {
self_elf = LoadedElf::Create(info.dli_fname, info.dli_fbase);
}
#if defined(ANDROID)
// On Android < 5.0, resolving weak symbols via dlsym doesn't work.
// The weak symbols Gecko uses are in either libc or libm, so we
// wrap those such that this linker does symbol resolution for them.
if (GetAndroidSDKVersion() < 21) {
if (dladdr(FunctionPtr(syscall), &info) != 0) {
libc = LoadedElf::Create(info.dli_fname, info.dli_fbase);
}
if (dladdr(FunctionPtr<int (*)(double)>(isnan), &info) != 0) {
libm = LoadedElf::Create(info.dli_fname, info.dli_fbase);
}
}
#endif
}
ElfLoader::~ElfLoader() {
LibHandleList list;
if (!Singleton.IsShutdownExpected()) {
MOZ_CRASH("Unexpected shutdown");
}
/* Release self_elf and libc */
self_elf = nullptr;
#if defined(ANDROID)
libc = nullptr;
libm = nullptr;
#endif
AutoLock lock(&handlesMutex);
/* Build up a list of all library handles with direct (external) references.
* We actually skip system library handles because we want to keep at least
* some of these open. Most notably, Mozilla codebase keeps a few libgnome
* libraries deliberately open because of the mess that libORBit destruction
* is. dlclose()ing these libraries actually leads to problems. */
for (LibHandleList::reverse_iterator it = handles.rbegin();
it < handles.rend(); ++it) {
if ((*it)->DirectRefCount()) {
if (SystemElf* se = (*it)->AsSystemElf()) {
se->Forget();
} else {
list.push_back(*it);
}
}
}
/* Force release all external references to the handles collected above */
for (LibHandleList::iterator it = list.begin(); it < list.end(); ++it) {
while ((*it)->ReleaseDirectRef()) {
}
}
/* Remove the remaining system handles. */
if (handles.size()) {
list = handles;
for (LibHandleList::reverse_iterator it = list.rbegin(); it < list.rend();
++it) {
if ((*it)->AsSystemElf()) {
DEBUG_LOG(
"ElfLoader::~ElfLoader(): Remaining handle for \"%s\" "
"[%" PRIdPTR " direct refs, %" PRIdPTR " refs total]",
(*it)->GetPath(), (*it)->DirectRefCount(), (*it)->refCount());
} else {
DEBUG_LOG(
"ElfLoader::~ElfLoader(): Unexpected remaining handle for \"%s\" "
"[%" PRIdPTR " direct refs, %" PRIdPTR " refs total]",
(*it)->GetPath(), (*it)->DirectRefCount(), (*it)->refCount());
/* Not removing, since it could have references to other libraries,
* destroying them as a side effect, and possibly leaving dangling
* pointers in the handle list we're scanning */
}
}
}
pthread_mutex_destroy(&handlesMutex);
}
#ifdef __ARM_EABI__
int ElfLoader::__wrap_aeabi_atexit(void* that, ElfLoader::Destructor destructor,
void* dso_handle) {
Singleton.destructors.push_back(
DestructorCaller(destructor, that, dso_handle));
return 0;
}
#else
int ElfLoader::__wrap_cxa_atexit(ElfLoader::Destructor destructor, void* that,
void* dso_handle) {
Singleton.destructors.push_back(
DestructorCaller(destructor, that, dso_handle));
return 0;
}
#endif
void ElfLoader::__wrap_cxa_finalize(void* dso_handle) {
/* Call all destructors for the given DSO handle in reverse order they were
* registered. */
std::vector<DestructorCaller>::reverse_iterator it;
for (it = Singleton.destructors.rbegin(); it < Singleton.destructors.rend();
++it) {
if (it->IsForHandle(dso_handle)) {
it->Call();
}
}
}
void ElfLoader::DestructorCaller::Call() {
if (destructor) {
DEBUG_LOG("ElfLoader::DestructorCaller::Call(%p, %p, %p)",
FunctionPtr(destructor), object, dso_handle);
destructor(object);
destructor = nullptr;
}
}
ElfLoader::DebuggerHelper::DebuggerHelper()
: dbg(nullptr), firstAdded(nullptr) {
/* Find ELF auxiliary vectors.
*
* The kernel stores the following data on the stack when starting a
* program:
* argc
* argv[0] (pointer into argv strings defined below)
* argv[1] (likewise)
* ...
* argv[argc - 1] (likewise)
* nullptr
* envp[0] (pointer into environment strings defined below)
* envp[1] (likewise)
* ...
* envp[n] (likewise)
* nullptr
* ... (more NULLs on some platforms such as Android 4.3)
* auxv[0] (first ELF auxiliary vector)
* auxv[1] (second ELF auxiliary vector)
* ...
* auxv[p] (last ELF auxiliary vector)
* (AT_NULL, nullptr)
* padding
* argv strings, separated with '\0'
* environment strings, separated with '\0'
* nullptr
*
* What we are after are the auxv values defined by the following struct.
*/
struct AuxVector {
Elf::Addr type;
Elf::Addr value;
};
/* Pointer to the environment variables list */
extern char** environ;
/* The environment may have changed since the program started, in which
* case the environ variables list isn't the list the kernel put on stack
* anymore. But in this new list, variables that didn't change still point
* to the strings the kernel put on stack. It is quite unlikely that two
* modified environment variables point to two consecutive strings in memory,
* so we assume that if two consecutive environment variables point to two
* consecutive strings, we found strings the kernel put on stack. */
char** env;
for (env = environ; *env; env++)
if (*env + strlen(*env) + 1 == env[1]) break;
if (!*env) return;
/* Next, we scan the stack backwards to find a pointer to one of those
* strings we found above, which will give us the location of the original
* envp list. As we are looking for pointers, we need to look at 32-bits or
* 64-bits aligned values, depening on the architecture. */
char** scan = reinterpret_cast<char**>(reinterpret_cast<uintptr_t>(*env) &
~(sizeof(void*) - 1));
while (*env != *scan) scan--;
/* Finally, scan forward to find the last environment variable pointer and
* thus the first auxiliary vector. */
while (*scan++)
;
/* Some platforms have more NULLs here, so skip them if we encounter them */
while (!*scan) scan++;
AuxVector* auxv = reinterpret_cast<AuxVector*>(scan);
/* The two values of interest in the auxiliary vectors are AT_PHDR and
* AT_PHNUM, which gives us the the location and size of the ELF program
* headers. */
Array<Elf::Phdr> phdrs;
char* base = nullptr;
while (auxv->type) {
if (auxv->type == AT_PHDR) {
phdrs.Init(reinterpret_cast<Elf::Phdr*>(auxv->value));
/* Assume the base address is the first byte of the same page */
base = reinterpret_cast<char*>(PageAlignedPtr(auxv->value));
}
if (auxv->type == AT_PHNUM) phdrs.Init(auxv->value);
auxv++;
}
if (!phdrs) {
DEBUG_LOG("Couldn't find program headers");
return;
}
/* In some cases, the address for the program headers we get from the
* auxiliary vectors is not mapped, because of the PT_LOAD segments
* definitions in the program executable. Trying to map anonymous memory
* with a hint giving the base address will return a different address
* if something is mapped there, and the base address otherwise. */
MappedPtr mem(MemoryRange::mmap(base, PageSize(), PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
if (mem == base) {
/* If program headers aren't mapped, try to map them */
int fd = open("/proc/self/exe", O_RDONLY);
if (fd == -1) {
DEBUG_LOG("Failed to open /proc/self/exe");
return;
}
mem.Assign(
MemoryRange::mmap(base, PageSize(), PROT_READ, MAP_PRIVATE, fd, 0));
/* If we don't manage to map at the right address, just give up. */
if (mem != base) {
DEBUG_LOG("Couldn't read program headers");
return;
}
}
/* Sanity check: the first bytes at the base address should be an ELF
* header. */
if (!Elf::Ehdr::validate(base)) {
DEBUG_LOG("Couldn't find program base");
return;
}
/* Search for the program PT_DYNAMIC segment */
Array<Elf::Dyn> dyns;
for (Array<Elf::Phdr>::iterator phdr = phdrs.begin(); phdr < phdrs.end();
++phdr) {
/* While the program headers are expected within the first mapped page of
* the program executable, the executable PT_LOADs may actually make them
* loaded at an address that is not the wanted base address of the
* library. We thus need to adjust the base address, compensating for the
* virtual address of the PT_LOAD segment corresponding to offset 0. */
if (phdr->p_type == PT_LOAD && phdr->p_offset == 0) base -= phdr->p_vaddr;
if (phdr->p_type == PT_DYNAMIC)
dyns.Init(base + phdr->p_vaddr, phdr->p_filesz);
}
if (!dyns) {
DEBUG_LOG("Failed to find PT_DYNAMIC section in program");
return;
}
/* Search for the DT_DEBUG information */
for (Array<Elf::Dyn>::iterator dyn = dyns.begin(); dyn < dyns.end(); ++dyn) {
if (dyn->d_tag == DT_DEBUG) {
dbg = reinterpret_cast<r_debug*>(dyn->d_un.d_ptr);
break;
}
}
DEBUG_LOG("DT_DEBUG points at %p", static_cast<void*>(dbg));
}
/**
* Helper class to ensure the given pointer is writable within the scope of
* an instance. Permissions to the memory page where the pointer lies are
* restored to their original value when the instance is destroyed.
*/
class EnsureWritable {
public:
template <typename T>
explicit EnsureWritable(T* ptr, size_t length_ = sizeof(T)) {
MOZ_ASSERT(length_ < PageSize());
prot = -1;
page = MAP_FAILED;
char* firstPage = PageAlignedPtr(reinterpret_cast<char*>(ptr));
char* lastPageEnd =
PageAlignedEndPtr(reinterpret_cast<char*>(ptr) + length_);
length = lastPageEnd - firstPage;
uintptr_t start = reinterpret_cast<uintptr_t>(firstPage);
uintptr_t end;
prot = getProt(start, &end);
if (prot == -1 || (start + length) > end) MOZ_CRASH();
if (prot & PROT_WRITE) {
success = true;
return;
}
page = firstPage;
int ret = mprotect(page, length, prot | PROT_WRITE);
success = ret == 0;
if (!success) {
ERROR("mprotect(%p, %zu, %d) = %d (errno=%d; %s)", page, length,
prot | PROT_WRITE, ret, errno, strerror(errno));
}
}
bool IsWritable() const { return success; }
~EnsureWritable() {
if (success && page != MAP_FAILED) {
mprotect(page, length, prot);
}
}
private:
int getProt(uintptr_t addr, uintptr_t* end) {
/* The interesting part of the /proc/self/maps format looks like:
* startAddr-endAddr rwxp */
int result = 0;
FILE* const f = fopen("/proc/self/maps", "r");
const auto cleanup = mozilla::MakeScopeExit([&]() {
if (f) fclose(f);
});
while (f) {
unsigned long long startAddr, endAddr;
char perms[5];
if (fscanf(f, "%llx-%llx %4s %*1024[^\n] ", &startAddr, &endAddr,
perms) != 3)
return -1;
if (addr < startAddr || addr >= endAddr) continue;
if (perms[0] == 'r')
result |= PROT_READ;
else if (perms[0] != '-')
return -1;
if (perms[1] == 'w')
result |= PROT_WRITE;
else if (perms[1] != '-')
return -1;
if (perms[2] == 'x')
result |= PROT_EXEC;
else if (perms[2] != '-')
return -1;
*end = endAddr;
return result;
}
return -1;
}
int prot;
void* page;
size_t length;
bool success;
};
/**
* The system linker maintains a doubly linked list of library it loads
* for use by the debugger. Unfortunately, it also uses the list pointers
* in a lot of operations and adding our data in the list is likely to
* trigger crashes when the linker tries to use data we don't provide or
* that fall off the amount data we allocated. Fortunately, the linker only
* traverses the list forward and accesses the head of the list from a
* private pointer instead of using the value in the r_debug structure.
* This means we can safely add members at the beginning of the list.
* Unfortunately, gdb checks the coherency of l_prev values, so we have
* to adjust the l_prev value for the first element the system linker
* knows about. Fortunately, it doesn't use l_prev, and the first element
* is not ever going to be released before our elements, since it is the
* program executable, so the system linker should not be changing
* r_debug::r_map.
*/
void ElfLoader::DebuggerHelper::Add(ElfLoader::link_map* map) {
if (!dbg->r_brk) return;
dbg->r_state = r_debug::RT_ADD;
dbg->r_brk();
if (!firstAdded) {
/* When adding a library for the first time, r_map points to data
* handled by the system linker, and that data may be read-only */
EnsureWritable w(&dbg->r_map->l_prev);
if (!w.IsWritable()) {
dbg->r_state = r_debug::RT_CONSISTENT;
dbg->r_brk();
return;
}
firstAdded = map;
dbg->r_map->l_prev = map;
} else
dbg->r_map->l_prev = map;
map->l_prev = nullptr;
map->l_next = dbg->r_map;
dbg->r_map = map;
dbg->r_state = r_debug::RT_CONSISTENT;
dbg->r_brk();
}
void ElfLoader::DebuggerHelper::Remove(ElfLoader::link_map* map) {
if (!dbg->r_brk) return;
dbg->r_state = r_debug::RT_DELETE;
dbg->r_brk();
if (map == firstAdded) {
/* When removing the first added library, its l_next is going to be
* data handled by the system linker, and that data may be read-only */
EnsureWritable w(&map->l_next->l_prev);
if (!w.IsWritable()) {
dbg->r_state = r_debug::RT_CONSISTENT;
dbg->r_brk();
return;
}
firstAdded = map->l_prev;
map->l_next->l_prev = map->l_prev;
} else if (map->l_next) {
map->l_next->l_prev = map->l_prev;
}
if (dbg->r_map == map)
dbg->r_map = map->l_next;
else if (map->l_prev) {
map->l_prev->l_next = map->l_next;
}
dbg->r_state = r_debug::RT_CONSISTENT;
dbg->r_brk();
}
#if defined(ANDROID) && defined(__NR_sigaction)
/* As some system libraries may be calling signal() or sigaction() to
* set a SIGSEGV handler, effectively breaking MappableSeekableZStream,
* or worse, restore our SIGSEGV handler with wrong flags (which using
* signal() will do), we want to hook into the system's sigaction() to
* replace it with our own wrapper instead, so that our handler is never
* replaced. We used to only do that with libraries this linker loads,
* but it turns out at least one system library does call signal() and
* breaks us (libsc-a3xx.so on the Samsung Galaxy S4).
* As libc's signal (bsd_signal/sysv_signal, really) calls sigaction
* under the hood, instead of calling the signal system call directly,
* we only need to hook sigaction. This is true for both bionic and
* glibc.
*/
/* libc's sigaction */
extern "C" int sigaction(int signum, const struct sigaction* act,
struct sigaction* oldact);
/* Simple reimplementation of sigaction. This is roughly equivalent
* to the assembly that comes in bionic, but not quite equivalent to
* glibc's implementation, so we only use this on Android. */
int sys_sigaction(int signum, const struct sigaction* act,
struct sigaction* oldact) {
return syscall(__NR_sigaction, signum, act, oldact);
}
/* Replace the first instructions of the given function with a jump
* to the given new function. */
template <typename T>
static bool Divert(T func, T new_func) {
void* ptr = FunctionPtr(func);
uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
# if defined(__i386__)
// A 32-bit jump is a 5 bytes instruction.
EnsureWritable w(ptr, 5);
*reinterpret_cast<unsigned char*>(addr) = 0xe9; // jmp
*reinterpret_cast<intptr_t*>(addr + 1) =
reinterpret_cast<uintptr_t>(new_func) - addr - 5; // target displacement
return true;
# elif defined(__arm__) || defined(__aarch64__)
const unsigned char trampoline[] = {
# ifdef __arm__
// .thumb
0x46, 0x04, // nop
0x78, 0x47, // bx pc
0x46, 0x04, // nop
// .arm
0x04, 0xf0, 0x1f, 0xe5, // ldr pc, [pc, #-4]
// .word <new_func>
# else // __aarch64__
0x50, 0x00,
0x00, 0x58, // ldr x16, [pc, #8] ; x16 (aka ip0) is the first
0x00, 0x02,
0x1f, 0xd6, // br x16 ; intra-procedure-call
// .word <new_func.lo> ; scratch register.
// .word <new_func.hi>
# endif
};
const unsigned char* start;
# ifdef __arm__
if (addr & 0x01) {
/* Function is thumb, the actual address of the code is without the
* least significant bit. */
addr--;
/* The arm part of the trampoline needs to be 32-bit aligned */
if (addr & 0x02)
start = trampoline;
else
start = trampoline + 2;
} else {
/* Function is arm, we only need the arm part of the trampoline */
start = trampoline + 6;
}
# else // __aarch64__
start = trampoline;
# endif
size_t len = sizeof(trampoline) - (start - trampoline);
EnsureWritable w(reinterpret_cast<void*>(addr), len + sizeof(void*));
memcpy(reinterpret_cast<void*>(addr), start, len);
*reinterpret_cast<void**>(addr + len) = FunctionPtr(new_func);
__builtin___clear_cache(reinterpret_cast<char*>(addr),
reinterpret_cast<char*>(addr + len + sizeof(void*)));
return true;
# else
return false;
# endif
}
#else
# define sys_sigaction sigaction
template <typename T>
static bool Divert(T func, T new_func) {
return false;
}
#endif
namespace {
/* Clock that only accounts for time spent in the current process. */
static uint64_t ProcessTimeStamp_Now() {
struct timespec ts;
int rv = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
if (rv != 0) {
return 0;
}
uint64_t baseNs = (uint64_t)ts.tv_sec * 1000000000;
return baseNs + (uint64_t)ts.tv_nsec;
}
} // namespace
/* Data structure used to pass data to the temporary signal handler,
* as well as triggering a test crash. */
struct TmpData {
volatile int crash_int;
volatile uint64_t crash_timestamp;
};
SEGVHandler::SEGVHandler()
: initialized(false), registeredHandler(false), signalHandlingSlow(true) {
/* Ensure logging is initialized before the DEBUG_LOG in the test_handler.
* As this constructor runs before the ElfLoader constructor (by effect
* of ElfLoader inheriting from this class), this also initializes on behalf
* of ElfLoader and DebuggerHelper. */
Logging::Init();
/* Initialize oldStack.ss_flags to an invalid value when used to set
* an alternative stack, meaning we haven't got information about the
* original alternative stack and thus don't mean to restore it in
* the destructor. */
oldStack.ss_flags = SS_ONSTACK;
/* Get the current segfault signal handler. */
struct sigaction old_action;
sys_sigaction(SIGSEGV, nullptr, &old_action);
/* Some devices have a kernel option enabled that makes SIGSEGV handler
* have an overhead so high that it affects how on-demand decompression
* performs. The handler will set signalHandlingSlow if the triggered
* SIGSEGV took too much time. */
struct sigaction action;
action.sa_sigaction = &SEGVHandler::test_handler;
sigemptyset(&action.sa_mask);
action.sa_flags = SA_SIGINFO | SA_NODEFER;
action.sa_restorer = nullptr;
stackPtr.Assign(MemoryRange::mmap(nullptr, PageSize(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
if (stackPtr.get() == MAP_FAILED) return;
if (sys_sigaction(SIGSEGV, &action, nullptr)) return;
TmpData* data = reinterpret_cast<TmpData*>(stackPtr.get());
data->crash_timestamp = ProcessTimeStamp_Now();
mprotect(stackPtr, stackPtr.GetLength(), PROT_NONE);
data->crash_int = 123;
/* Restore the original segfault signal handler. */
sys_sigaction(SIGSEGV, &old_action, nullptr);
stackPtr.Assign(MAP_FAILED, 0);
}
void SEGVHandler::FinishInitialization() {
/* Ideally, we'd need some locking here, but in practice, we're not
* going to race with another thread. */
initialized = true;
if (signalHandlingSlow) {
return;
}
typedef int (*sigaction_func)(int, const struct sigaction*,
struct sigaction*);
sigaction_func libc_sigaction;
#if defined(ANDROID)
/* Android > 4.4 comes with a sigaction wrapper in a LD_PRELOADed library
* (libsigchain) for ART. That wrapper kind of does the same trick as we
* do, so we need extra care in handling it.
* - Divert the libc's sigaction, assuming the LD_PRELOADed library uses
* it under the hood (which is more or less true according to the source
* of that library, since it's doing a lookup in RTLD_NEXT)
* - With the LD_PRELOADed library in place, all calls to sigaction from
* from system libraries will go to the LD_PRELOADed library.
* - The LD_PRELOADed library calls to sigaction go to our __wrap_sigaction.
* - The calls to sigaction from libraries faulty.lib loads are sent to
* the LD_PRELOADed library.
* In practice, for signal handling, this means:
* - The signal handler registered to the kernel is ours.
* - Our handler redispatches to the LD_PRELOADed library's if there's a
* segfault we don't handle.
* - The LD_PRELOADed library redispatches according to whatever system
* library or faulty.lib-loaded library set with sigaction.
*
* When there is no sigaction wrapper in place:
* - Divert the libc's sigaction.
* - Calls to sigaction from system library and faulty.lib-loaded libraries
* all go to the libc's sigaction, which end up in our __wrap_sigaction.
* - The signal handler registered to the kernel is ours.
* - Our handler redispatches according to whatever system library or
* faulty.lib-loaded library set with sigaction.
*/
void* libc = dlopen("libc.so", RTLD_GLOBAL | RTLD_LAZY);
if (libc) {
/*
* Lollipop bionic only has a small trampoline in sigaction, with the real
* work happening in __sigaction. Divert there instead of sigaction if it
* exists. Bug 1154803
*/
libc_sigaction =
reinterpret_cast<sigaction_func>(dlsym(libc, "__sigaction"));
if (!libc_sigaction) {
libc_sigaction =
reinterpret_cast<sigaction_func>(dlsym(libc, "sigaction"));
}
} else
#endif
{
libc_sigaction = sigaction;
}
if (!Divert(libc_sigaction, __wrap_sigaction)) return;
/* Setup an alternative stack if the already existing one is not big
* enough, or if there is none. */
if (sigaltstack(nullptr, &oldStack) == 0) {
if (oldStack.ss_flags == SS_ONSTACK) oldStack.ss_flags = 0;
if (!oldStack.ss_sp || oldStack.ss_size < stackSize) {
stackPtr.Assign(MemoryRange::mmap(nullptr, stackSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
if (stackPtr.get() == MAP_FAILED) return;
stack_t stack;
stack.ss_sp = stackPtr;
stack.ss_size = stackSize;
stack.ss_flags = 0;
if (sigaltstack(&stack, nullptr) != 0) return;
}
}
/* Register our own handler, and store the already registered one in
* SEGVHandler's struct sigaction member */
action.sa_sigaction = &SEGVHandler::handler;
action.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
registeredHandler = !sys_sigaction(SIGSEGV, &action, &this->action);
}
SEGVHandler::~SEGVHandler() {
/* Restore alternative stack for signals */
if (oldStack.ss_flags != SS_ONSTACK) sigaltstack(&oldStack, nullptr);
/* Restore original signal handler */
if (registeredHandler) sys_sigaction(SIGSEGV, &this->action, nullptr);
}
/* Test handler for a deliberately triggered SIGSEGV that determines whether
* the segfault handler is called quickly enough. */
void SEGVHandler::test_handler(int signum, siginfo_t* info, void* context) {
SEGVHandler& that = ElfLoader::Singleton;
mprotect(that.stackPtr, that.stackPtr.GetLength(), PROT_READ | PROT_WRITE);
TmpData* data = reinterpret_cast<TmpData*>(that.stackPtr.get());
uint64_t latency = ProcessTimeStamp_Now() - data->crash_timestamp;
DEBUG_LOG("SEGVHandler latency: %" PRIu64, latency);
/* See bug 886736 for timings on different devices, 150 µs is reasonably above
* the latency on "working" devices and seems to be short enough to not incur
* a huge overhead to on-demand decompression. */
if (latency <= 150000) that.signalHandlingSlow = false;
}
/* TODO: "properly" handle signal masks and flags */
void SEGVHandler::handler(int signum, siginfo_t* info, void* context) {
// ASSERT(signum == SIGSEGV);
DEBUG_LOG("Caught segmentation fault @%p", info->si_addr);
/* Redispatch to the registered handler */
SEGVHandler& that = ElfLoader::Singleton;
if (that.action.sa_flags & SA_SIGINFO) {
DEBUG_LOG("Redispatching to registered handler @%p",
FunctionPtr(that.action.sa_sigaction));
that.action.sa_sigaction(signum, info, context);
} else if (that.action.sa_handler == SIG_DFL) {
DEBUG_LOG("Redispatching to default handler");
/* Reset the handler to the default one, and trigger it. */
sys_sigaction(signum, &that.action, nullptr);
raise(signum);
} else if (that.action.sa_handler != SIG_IGN) {
DEBUG_LOG("Redispatching to registered handler @%p",
FunctionPtr(that.action.sa_handler));
that.action.sa_handler(signum);
} else {
DEBUG_LOG("Ignoring");
}
}
int SEGVHandler::__wrap_sigaction(int signum, const struct sigaction* act,
struct sigaction* oldact) {
SEGVHandler& that = ElfLoader::Singleton;
/* Use system sigaction() function for all but SIGSEGV signals. */
if (!that.registeredHandler || (signum != SIGSEGV))
return sys_sigaction(signum, act, oldact);
if (oldact) *oldact = that.action;
if (act) that.action = *act;
return 0;
}