i686: Implement patching of vDSO syscall wrapper

parent 67f8bdac
......@@ -40,6 +40,7 @@ struct syscall_desc;
struct cs_insn;
bool arch_insn_is_syscall(struct cs_insn *insn);
bool arch_insn_is_vsyscall(struct cs_insn *insn);
unsigned arch_trampoline_entry_size();
......
......@@ -37,7 +37,8 @@ set(SOURCES_ARCH
intercept.c)
set(SOURCES_ASM util.S
intercept_template.S)
intercept_template.S
intercept_wrapper.S)
add_library(syscall_intercept_base_arch OBJECT ${SOURCES_ARCH})
include_directories(syscall_intercept_base_arch ${CMAKE_SOURCE_DIR}/src)
......
.global intercept_wrapper
/* intercept_wrapper
*
* The fast syscall intercept
*
* %rax - Syscall number
* %ebx - syscall argument 1
* %ecx - syscall argument 2
* %edx - syscall argument 3
* %esi - syscall argument 4
* %edi - syscall argument 5
* %ebp - syscall argument 6
*
* This is invoked from a call instruction in the fast syscall path or
* from a call instructiorn in the wrapper template. Therefore, we can
* trust the return address to be on the stack.
*/
intercept_wrapper:
push %ebp
movl %esp, %ebp
push %ebx
sub $0x2c, %esp
/* Save base registers */
mov %esp, %ebx
/* Set patch_desc */
movl $0x0, (%ebx)
movl %eax, 4(%ebx)
movl %ecx, 8(%ebx)
movl %edx, 12(%ebx)
movl -4(%ebp), %eax
movl %eax, 16(%ebx) /* original ebx */
movl %ebp, %eax
addl $4, %eax
movl %eax, 20(%ebx) /* original esp */
movl (%ebp), %eax
movl %eax, 24(%ebx) /* original ebp */
movl %esi, 28(%ebx)
movl %edi, 32(%ebx)
mov %ebp, %eax
sub $0x8, %eax
push %ebx
push %eax
call intercept_routine
mov (%eax), %eax
/* callee has already pop'ed the stacked %eax in this case,
since the return value is a complex type returned in memory. So
esp already points to the real first function argument. Ain't
this ABI fun? */
add $4, %esp
/* restore registers from struct sys_ctx.
*
* Notes: esp and ebp are recovered from the stack.
*/
movl 32(%esp), %edi
movl 28(%esp), %esi
movl 16(%esp), %ebx
movl 12(%esp), %edx
movl 8(%esp), %ecx
mov %ebp, %esp
pop %ebp
ret
......@@ -46,3 +46,12 @@ bool arch_insn_is_syscall(struct cs_insn *insn)
return true;
return false;
}
bool arch_insn_is_vsyscall(struct cs_insn *insn)
{
if (insn->id == X86_INS_CALL &&
insn->detail->x86.prefix[1] == 0x65) {
return true;
}
return false;
}
.globl intercept_wrapper
.globl has_ymm_registers
.globl syscall_no_intercept
intercept_wrapper:
.word 0x100
has_ymm_registers:
mov $0x0, %eax
......
......@@ -43,3 +43,9 @@ unsigned arch_trampoline_entry_size()
{
return 7;
}
bool arch_insn_is_vsyscall(struct cs_insn *insn)
{
(void) insn;
return false;
}
......@@ -226,6 +226,7 @@ intercept_disasm_next_instruction(struct intercept_disasm_context *context,
assert(result.length != 0);
result.is_syscall = arch_insn_is_syscall(context->insn);
result.is_vsyscall = arch_insn_is_vsyscall(context->insn);
result.is_call = (context->insn->id == X86_INS_CALL);
result.is_ret = (context->insn->id == X86_INS_RET);
result.is_rel_jump = false;
......
......@@ -53,6 +53,7 @@ struct intercept_disasm_result {
bool is_set;
bool is_syscall;
bool is_vsyscall;
/* Length in bytes, zero if disasm was not successful. */
unsigned length;
......
......@@ -124,6 +124,8 @@ struct patch_desc {
bool uses_nop_trampoline;
struct range nop_trampoline;
bool vsyscall;
};
void patch_apply(struct patch_desc *patch);
......@@ -217,8 +219,10 @@ void mprotect_asm_wrappers(void);
void activate_patches(struct intercept_desc *desc);
#define SYSCALL_INS_SIZE 2
#define VSYSCALL_INS_SIZE 7
#define JUMP_INS_SIZE 5
#define CALL_OPCODE 0xe8
#define CALL_INS_SIZE 5
#define JMP_OPCODE 0xe9
#define SHORT_JMP_OPCODE 0xeb
#define PUSH_IMM_OPCODE 0x68
......
......@@ -556,6 +556,19 @@ crawl_text(struct intercept_desc *desc)
assert(syscall_offset >= 0);
patch->syscall_offset = (unsigned long)syscall_offset;
patch->vsyscall = false;
} else if (result.is_vsyscall) {
struct patch_desc *patch = add_new_patch(desc);
patch->containing_lib_path = desc->path;
patch->syscall_addr = code;
ptrdiff_t syscall_offset = patch->syscall_addr -
(desc->text_start - desc->text_offset);
assert(syscall_offset >= 0);
patch->syscall_offset = (unsigned long)syscall_offset;
patch->vsyscall = true;
}
prevs[0] = prevs[1];
......
......@@ -160,6 +160,21 @@ create_jump(unsigned char opcode, unsigned char *from, void *to)
from[4] = d[3];
}
void
create_nop(unsigned char *from, int length)
{
/* Prefer large nops */
while(length > 1) {
from[0] = 0x66;
from[1] = 0x90;
length -= 2;
from += 2;
}
if (length)
from[0] = 0x90;
}
/*
* check_trampoline_usage -
* Make sure the trampoline table allocated at the beginning of patching has
......@@ -364,9 +379,18 @@ create_patch_wrappers(struct intercept_desc *desc)
for (unsigned patch_i = 0; patch_i < desc->count; ++patch_i) {
struct patch_desc *patch = desc->items + patch_i;
assign_nop_trampoline(desc, patch, &next_nop_i);
if (!patch->vsyscall)
assign_nop_trampoline(desc, patch, &next_nop_i);
if (patch->uses_nop_trampoline) {
if (patch->vsyscall) {
patch->uses_prev_ins = false;
patch->uses_prev_ins_2 = false;
patch->uses_next_ins = false;
patch->dst_jmp_patch = patch->syscall_addr;
patch->uses_nop_trampoline = false;
patch->return_address =
patch->syscall_addr + VSYSCALL_INS_SIZE;
} else if (patch->uses_nop_trampoline) {
/*
* The preferred option it to use a 5 byte relative
* jump in a padding space between symbols in libc.
......@@ -596,6 +620,11 @@ create_wrapper(struct patch_desc *patch)
{
unsigned char *dst;
if (patch->vsyscall) {
patch->asm_wrapper = (unsigned char*)&intercept_wrapper;
return;
}
if (is_asm_wrapper_space_full())
xabort("not enough space in asm_wrapper_space");
......@@ -703,7 +732,13 @@ activate_patches(struct intercept_desc *desc)
* it (an overwritable NOP instruction).
*/
if (desc->uses_trampoline_table) {
if (patch->vsyscall) {
create_jump(CALL_OPCODE,
patch->dst_jmp_patch, patch->asm_wrapper);
create_nop(patch->dst_jmp_patch + CALL_INS_SIZE,
VSYSCALL_INS_SIZE - CALL_INS_SIZE);
continue;
} else if (desc->uses_trampoline_table) {
/*
* First jump to the trampoline table, which
* should be in a 2 gigabyte range. From there,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment