* Block operations.
*/
-JIT_OP_MEMCPY: manual
- [] -> {
- unsigned char *inst;
- int reg, reg2, reg3;
- int regi, save_reg3;
+JIT_OP_MEMCPY: ternary
+ [any, any, imm, if("$3 <= 0")] -> { }
+ [reg, reg, imm, if("$3 <= 32"),
+ scratch("eax" | "ecx" | "edx" | "ebx"), space("32 + $3 * 4")] -> {
int disp;
-
- if(insn->value2->is_constant && insn->value2->address <= 0)
+ disp = 0;
+ while($3 >= (disp + 4))
{
+ x86_mov_reg_membase(inst, $4, $2, disp, 4);
+ x86_mov_membase_reg(inst, $1, disp, $4, 4);
+ disp += 4;
}
- else if(insn->value2->is_constant && insn->value2->address <= 32)
+ if($3 >= (disp + 2))
{
- reg = _jit_regs_load_value
- (gen, insn->dest, 0,
- (insn->flags & (JIT_INSN_DEST_NEXT_USE | JIT_INSN_DEST_LIVE)));
- reg2 = _jit_regs_load_value
- (gen, insn->value1, 0,
- (insn->flags & (JIT_INSN_VALUE1_NEXT_USE | JIT_INSN_VALUE1_LIVE)));
-
- reg3 = -1;
- save_reg3 = 0;
- for(regi = 0; regi < 4; regi++)
- {
- if(regi != reg && regi != reg2)
- {
- if(gen->contents[regi].num_values == 0 &&
- !gen->contents[regi].used_for_temp &&
- !gen->contents[reg].is_long_end)
- {
- reg3 = regi;
- break;
- }
- if(reg3 == -1)
- {
- reg3 = regi;
- }
- }
- }
- if(gen->contents[reg3].num_values > 0 ||
- gen->contents[regi].used_for_temp ||
- gen->contents[reg].is_long_end)
- {
- save_reg3 = 1;
- }
-
- inst = gen->posn.ptr;
- if(!jit_cache_check_for_n(&(gen->posn), 256))
- {
- jit_cache_mark_full(&(gen->posn));
- return;
- }
-
- reg = _jit_reg_info[reg].cpu_reg;
- reg2 = _jit_reg_info[reg2].cpu_reg;
- reg3 = _jit_reg_info[reg3].cpu_reg;
-
- if(save_reg3)
- {
- x86_push_reg(inst, reg3);
- }
-
- disp = 0;
- while(insn->value2->address >= (disp + 4))
- {
- x86_mov_reg_membase(inst, reg3, reg2, disp, 4);
- x86_mov_membase_reg(inst, reg, disp, reg3, 4);
- disp += 4;
- }
- if(insn->value2->address >= (disp + 2))
- {
- x86_mov_reg_membase(inst, reg3, reg2, disp, 2);
- x86_mov_membase_reg(inst, reg, disp, reg3, 2);
- disp += 2;
- }
- if(insn->value2->address > disp)
- {
- x86_mov_reg_membase(inst, reg3, reg2, disp, 1);
- x86_mov_membase_reg(inst, reg, disp, reg3, 1);
- }
-
- if(save_reg3)
- {
- x86_pop_reg(inst, reg3);
- }
-
- gen->posn.ptr = inst;
+ x86_mov_reg_membase(inst, $4, $2, disp, 2);
+ x86_mov_membase_reg(inst, $1, disp, $4, 2);
+ disp += 2;
}
- else
+ if($3 > disp)
{
- reg = _jit_regs_load_value
- (gen, insn->dest, 0,
- (insn->flags & (JIT_INSN_DEST_NEXT_USE | JIT_INSN_DEST_LIVE)));
- reg2 = _jit_regs_load_value
- (gen, insn->value1, 0,
- (insn->flags & (JIT_INSN_VALUE1_NEXT_USE | JIT_INSN_VALUE1_LIVE)));
- reg3 = _jit_regs_load_value
- (gen, insn->value2, 0,
- (insn->flags & (JIT_INSN_VALUE2_NEXT_USE | JIT_INSN_VALUE2_LIVE)));
-
- /* A function call may destroy EAX,EBX,ECX,EDX registers. */
- /* TODO: do not spill ESI and EDI. */
- _jit_regs_spill_all(gen);
-
- inst = gen->posn.ptr;
- if(!jit_cache_check_for_n(&(gen->posn), 32))
- {
- jit_cache_mark_full(&(gen->posn));
- return;
- }
-
- x86_push_reg(inst, _jit_reg_info[reg3].cpu_reg);
- x86_push_reg(inst, _jit_reg_info[reg2].cpu_reg);
- x86_push_reg(inst, _jit_reg_info[reg].cpu_reg);
- x86_call_code(inst, jit_memcpy);
- x86_alu_reg_imm(inst, X86_ADD, X86_ESP, 3 * sizeof(void *));
-
- gen->posn.ptr = inst;
+ x86_mov_reg_membase(inst, $4, $2, disp, 1);
+ x86_mov_membase_reg(inst, $1, disp, $4, 1);
}
}
+ [reg, reg, reg, clobber("eax", "ecx", "edx", "ebx")] -> {
+ x86_push_reg(inst, $3);
+ x86_push_reg(inst, $2);
+ x86_push_reg(inst, $1);
+ x86_call_code(inst, jit_memcpy);
+ x86_alu_reg_imm(inst, X86_ADD, X86_ESP, 3 * sizeof(void *));
+ }
-JIT_OP_MEMMOVE: manual
- [] -> {
- unsigned char *inst;
- int reg, reg2, reg3;
-
- reg = _jit_regs_load_value
- (gen, insn->dest, 0,
- (insn->flags & (JIT_INSN_DEST_NEXT_USE | JIT_INSN_DEST_LIVE)));
- reg2 = _jit_regs_load_value
- (gen, insn->value1, 0,
- (insn->flags & (JIT_INSN_VALUE1_NEXT_USE | JIT_INSN_VALUE1_LIVE)));
- reg3 = _jit_regs_load_value
- (gen, insn->value2, 0,
- (insn->flags & (JIT_INSN_VALUE2_NEXT_USE | JIT_INSN_VALUE2_LIVE)));
-
- /* A function call may destroy EAX,EBX,ECX,EDX registers. */
- /* TODO: do not spill ESI and EDI. */
- _jit_regs_spill_all(gen);
-
- inst = gen->posn.ptr;
- if(!jit_cache_check_for_n(&(gen->posn), 32))
- {
- jit_cache_mark_full(&(gen->posn));
- return;
- }
-
- x86_push_reg(inst, _jit_reg_info[reg3].cpu_reg);
- x86_push_reg(inst, _jit_reg_info[reg2].cpu_reg);
- x86_push_reg(inst, _jit_reg_info[reg].cpu_reg);
+JIT_OP_MEMMOVE: ternary
+ [any, any, imm, if("$3 <= 0")] -> { }
+ [reg, reg, reg, clobber("eax", "ecx", "edx", "ebx")] -> {
+ x86_push_reg(inst, $3);
+ x86_push_reg(inst, $2);
+ x86_push_reg(inst, $1);
x86_call_code(inst, jit_memmove);
x86_alu_reg_imm(inst, X86_ADD, X86_ESP, 3 * sizeof(void *));
-
- gen->posn.ptr = inst;
}
-JIT_OP_MEMSET: manual
- [] -> {
- unsigned char *inst;
- int reg, reg2, reg3;
- int regi, save_reg3;
+JIT_OP_MEMSET: ternary
+ [any, any, imm, if("$3 <= 0")] -> { }
+ [reg, imm, imm, if("$3 <= 32"), space("32 + $3 * 4")] -> {
int disp;
-
- if(insn->value2->is_constant && insn->value2->address <= 0)
+ disp = 0;
+ while($3 >= (disp + 4))
{
+ x86_mov_membase_imm(inst, $1, disp, $2 * 0x01010101, 4);
+ disp += 4;
}
- else if(insn->value2->is_constant && insn->value2->address <= 32)
+ if($3 >= (disp + 2))
{
- reg = _jit_regs_load_value
- (gen, insn->dest, 0,
- (insn->flags & (JIT_INSN_DEST_NEXT_USE | JIT_INSN_DEST_LIVE)));
-
- reg2 = -1;
- reg3 = -1;
- save_reg3 = 0;
-
- if(insn->value1->is_constant)
- {
- inst = gen->posn.ptr;
- if(!jit_cache_check_for_n(&(gen->posn), 256))
- {
- jit_cache_mark_full(&(gen->posn));
- return;
- }
- reg = _jit_reg_info[reg].cpu_reg;
- }
- else
- {
- reg2 = _jit_regs_load_value
- (gen, insn->value1, insn->value2->address >= 4,
- (insn->flags & (JIT_INSN_VALUE1_NEXT_USE | JIT_INSN_VALUE1_LIVE)));
-
- if(insn->value2->address >= 2 || !X86_IS_BYTE_REG(reg2))
- {
- reg3 = -1;
- for(regi = 0; regi < 4; regi++)
- {
- if(regi != reg && regi != reg2)
- {
- if(gen->contents[regi].num_values == 0 &&
- !gen->contents[regi].used_for_temp &&
- !gen->contents[reg].is_long_end)
- {
- reg3 = regi;
- break;
- }
- if(reg3 == -1)
- {
- reg3 = regi;
- }
- }
- }
- if(gen->contents[reg3].num_values > 0 ||
- gen->contents[regi].used_for_temp ||
- gen->contents[reg].is_long_end)
- {
- save_reg3 = 1;
- }
- }
-
- inst = gen->posn.ptr;
- if(!jit_cache_check_for_n(&(gen->posn), 256))
- {
- jit_cache_mark_full(&(gen->posn));
- return;
- }
-
- reg = _jit_reg_info[reg].cpu_reg;
- reg2 = _jit_reg_info[reg2].cpu_reg;
-
- if(insn->value2->address >= 2 || !X86_IS_BYTE_REG(reg2))
- {
- reg3 = _jit_reg_info[reg3].cpu_reg;
-
- if(save_reg3)
- {
- x86_push_reg(inst, reg3);
- }
-
- x86_mov_reg_reg(inst, reg3, reg2, 4);
- if(insn->value2->address >= 2)
- {
- x86_shift_reg_imm(inst, X86_SHL, reg3, 8);
- x86_alu_reg_reg(inst, X86_OR, reg3, reg2);
- if(insn->value2->address >= 4)
- {
- x86_mov_reg_reg(inst, reg2, reg3, 4);
- x86_shift_reg_imm(inst, X86_SHL, reg3, 16);
- x86_alu_reg_reg(inst, X86_OR, reg3, reg2);
- }
- }
- }
- }
-
- disp = 0;
- while(insn->value2->address >= (disp + 4))
- {
- if(insn->value1->is_constant)
- {
- x86_mov_membase_imm
- (inst, reg, disp,
- insn->value1->address * 0x01010101, 4);
- }
- else
- {
- x86_mov_membase_reg(inst, reg, disp, reg3, 4);
- }
- disp += 4;
- }
- if(insn->value2->address >= (disp + 2))
- {
- if(insn->value1->is_constant)
- {
- x86_mov_membase_imm
- (inst, reg, disp,
- insn->value1->address * 0x0101, 2);
- }
- else
- {
- x86_mov_membase_reg(inst, reg, disp, reg3, 2);
- }
- disp += 2;
- }
- if(insn->value2->address > disp)
- {
- if(insn->value1->is_constant)
- {
- x86_mov_membase_imm
- (inst, reg, disp,
- insn->value1->address, 1);
- }
- else if(insn->value2->address >= 2 || !X86_IS_BYTE_REG(reg2))
- {
- x86_mov_membase_reg(inst, reg, disp, reg3, 1);
- }
- else
- {
- x86_mov_membase_reg(inst, reg, disp, reg2, 1);
- }
- }
-
- if(save_reg3)
- {
- x86_pop_reg(inst, reg3);
- }
-
- gen->posn.ptr = inst;
+ x86_mov_membase_imm(inst, $1, disp, $2 * 0x0101, 2);
+ disp += 2;
}
- else
+ if(insn->value2->address > disp)
{
- reg = _jit_regs_load_value
- (gen, insn->dest, 0,
- (insn->flags & (JIT_INSN_DEST_NEXT_USE | JIT_INSN_DEST_LIVE)));
- reg2 = _jit_regs_load_value
- (gen, insn->value1, 0,
- (insn->flags & (JIT_INSN_VALUE1_NEXT_USE | JIT_INSN_VALUE1_LIVE)));
- reg3 = _jit_regs_load_value
- (gen, insn->value2, 0,
- (insn->flags & (JIT_INSN_VALUE2_NEXT_USE | JIT_INSN_VALUE2_LIVE)));
-
- /* A function call may destroy EAX,EBX,ECX,EDX registers. */
- /* TODO: do not spill ESI and EDI. */
- _jit_regs_spill_all(gen);
-
- inst = gen->posn.ptr;
- if(!jit_cache_check_for_n(&(gen->posn), 32))
+ x86_mov_membase_imm(inst, $1, disp, $2, 1);
+ }
+ }
+ [reg, reg("eax"|"ecx"|"edx"|"ebx"), imm, if("$3 < 4")] -> {
+ x86_mov_membase_reg(inst, $1, 0, $2, 1);
+ if($3 > 1)
+ {
+ x86_mov_membase_reg(inst, $1, 1, $2, 1);
+ if($3 > 2)
{
- jit_cache_mark_full(&(gen->posn));
- return;
+ x86_mov_membase_reg(inst, $1, 2, $2, 1);
}
-
- x86_push_reg(inst, _jit_reg_info[reg3].cpu_reg);
- x86_push_reg(inst, _jit_reg_info[reg2].cpu_reg);
- x86_push_reg(inst, _jit_reg_info[reg].cpu_reg);
- x86_call_code(inst, jit_memset);
- x86_alu_reg_imm(inst, X86_ADD, X86_ESP, 3 * sizeof(void *));
-
- gen->posn.ptr = inst;
}
}
+ [reg, +reg, imm, scratch("?"),
+ if("$3 <= 32 && ($3 % 2) == 0"), space("32 + $3 * 4")] -> {
+ int disp;
+ x86_mov_reg_reg(inst, $4, $2, 4);
+ x86_shift_reg_imm(inst, X86_SHL, $2, 8);
+ x86_alu_reg_reg(inst, X86_OR, $2, $4);
+ x86_mov_reg_reg(inst, $4, $2, 4);
+ x86_shift_reg_imm(inst, X86_SHL, $2, 16);
+ x86_alu_reg_reg(inst, X86_OR, $2, $4);
+ disp = 0;
+ while($3 >= (disp + 4))
+ {
+ x86_mov_membase_reg(inst, $1, disp, $2, 4);
+ disp += 4;
+ }
+ if($3 > disp)
+ {
+ x86_mov_membase_reg(inst, $1, disp, $2, 2);
+ }
+ }
+ [reg, +reg("eax"|"ecx"|"edx"|"ebx"), imm, scratch("?"),
+ if("$3 <= 32 && ($3 % 2) != 0"), space("32 + $3 * 4")] -> {
+ int disp;
+ x86_mov_reg_reg(inst, $4, $2, 4);
+ x86_shift_reg_imm(inst, X86_SHL, $2, 8);
+ x86_alu_reg_reg(inst, X86_OR, $2, $4);
+ x86_mov_reg_reg(inst, $4, $2, 4);
+ x86_shift_reg_imm(inst, X86_SHL, $2, 16);
+ x86_alu_reg_reg(inst, X86_OR, $2, $4);
+ disp = 0;
+ while($3 >= (disp + 4))
+ {
+ x86_mov_membase_reg(inst, $1, disp, $2, 4);
+ disp += 4;
+ }
+ if($3 >= (disp + 2))
+ {
+ x86_mov_membase_reg(inst, $1, disp, $2, 2);
+ disp += 2;
+ }
+ if($3 > disp)
+ {
+ x86_mov_membase_reg(inst, $1, disp, $2, 1);
+ }
+ }
+ [reg, reg, reg, clobber("eax", "ecx", "edx", "ebx")] -> {
+ x86_push_reg(inst, $3);
+ x86_push_reg(inst, $2);
+ x86_push_reg(inst, $1);
+ x86_call_code(inst, jit_memset);
+ x86_alu_reg_imm(inst, X86_ADD, X86_ESP, 3 * sizeof(void *));
+ }
/*
* Allocate memory from the stack.