diff options
author | Andy Wingo <wingo@pobox.com> | 2015-11-20 14:52:35 +0100 |
---|---|---|
committer | Andy Wingo <wingo@pobox.com> | 2015-12-01 11:30:55 +0100 |
commit | bdfa1c1b424fc6d408c55e7db17cb3ed7117606a (patch) | |
tree | c2c128b948d8de50b7fc2bc67af5a1e662a57a52 /libguile/vm-engine.c | |
parent | 8f18b71b7afcd475553f760f83af7d79fc34cf01 (diff) |
Add tagged and untagged arithmetic ops with immediate operands
* libguile/vm-engine.c (add/immediate, sub/immediate)
(uadd/immediate, usub/immediate, umul/immediate): New instructions.
* module/language/cps/compile-bytecode.scm (compile-function):
* module/language/cps/slot-allocation.scm (compute-needs-slot):
* module/language/cps/types.scm:
* module/system/vm/assembler.scm (system):
* module/language/cps/effects-analysis.scm: Support
for new instructions.
* module/language/cps/optimize.scm (optimize-first-order-cps): Move
primcall specialization to the last step -- the only benefit of doing
it earlier was easier reasoning about side effects, and we're already
doing that in a more general way with (language cps types).
* module/language/cps/specialize-primcalls.scm (specialize-primcalls):
Specialize add and sub to add/immediate and sub/immediate, and
specialize u64 addition as well. U64 specialization doesn't work now
though because computing constant values doesn't work for U64s; oh
well.
Diffstat (limited to 'libguile/vm-engine.c')
-rw-r--r-- | libguile/vm-engine.c | 102 |
1 files changed, 97 insertions, 5 deletions
diff --git a/libguile/vm-engine.c b/libguile/vm-engine.c index 80ab3afd8..2f3b3fd85 100644 --- a/libguile/vm-engine.c +++ b/libguile/vm-engine.c @@ -2382,7 +2382,29 @@ VM_NAME (scm_i_thread *thread, struct scm_vm *vp, BINARY_INTEGER_OP (+, scm_sum); } - VM_DEFINE_OP (87, unused_87, NULL, NOP) + /* add/immediate dst:8 src:8 imm:8 + * + * Add the unsigned 8-bit value IMM to the value from SRC, and place + * the result in DST. + */ + VM_DEFINE_OP (87, add_immediate, "add/immediate", OP1 (X8_S8_S8_C8) | OP_DST) + { + scm_t_uint8 dst, src, imm; + SCM x; + + UNPACK_8_8_8 (op, dst, src, imm); + x = SP_REF (src); + + if (SCM_LIKELY (SCM_I_INUMP (x))) + { + scm_t_signed_bits sum = SCM_I_INUM (x) + (scm_t_signed_bits) imm; + + if (SCM_LIKELY (SCM_POSFIXABLE (sum))) + RETURN (SCM_I_MAKINUM (sum)); + } + + RETURN_EXP (scm_sum (x, SCM_I_MAKINUM (imm))); + } /* sub dst:8 a:8 b:8 * @@ -2393,7 +2415,29 @@ VM_NAME (scm_i_thread *thread, struct scm_vm *vp, BINARY_INTEGER_OP (-, scm_difference); } - VM_DEFINE_OP (89, unused_89, NULL, NOP) + /* sub/immediate dst:8 src:8 imm:8 + * + * Subtract the unsigned 8-bit value IMM from the value in SRC, and + * place the result in DST. + */ + VM_DEFINE_OP (89, sub_immediate, "sub/immediate", OP1 (X8_S8_S8_C8) | OP_DST) + { + scm_t_uint8 dst, src, imm; + SCM x; + + UNPACK_8_8_8 (op, dst, src, imm); + x = SP_REF (src); + + if (SCM_LIKELY (SCM_I_INUMP (x))) + { + scm_t_signed_bits diff = SCM_I_INUM (x) - (scm_t_signed_bits) imm; + + if (SCM_LIKELY (SCM_NEGFIXABLE (diff))) + RETURN (SCM_I_MAKINUM (diff)); + } + + RETURN_EXP (scm_difference (x, SCM_I_MAKINUM (imm))); + } /* mul dst:8 a:8 b:8 * @@ -3400,9 +3444,57 @@ VM_NAME (scm_i_thread *thread, struct scm_vm *vp, NEXT (1); } - VM_DEFINE_OP (152, unused_152, NULL, NOP) - VM_DEFINE_OP (153, unused_153, NULL, NOP) - VM_DEFINE_OP (154, unused_154, NULL, NOP) + /* uadd/immediate dst:8 src:8 imm:8 + * + * Add the unsigned 64-bit value from SRC with the unsigned 8-bit + * value IMM and place the raw unsigned 64-bit result in DST. + * Overflow will wrap around. + */ + VM_DEFINE_OP (152, uadd_immediate, "uadd/immediate", OP1 (X8_S8_S8_C8) | OP_DST) + { + scm_t_uint8 dst, src, imm; + scm_t_uint64 x; + + UNPACK_8_8_8 (op, dst, src, imm); + x = SP_REF_U64 (src); + SP_SET_U64 (dst, x + (scm_t_uint64) imm); + NEXT (1); + } + + /* usub/immediate dst:8 src:8 imm:8 + * + * Subtract the unsigned 8-bit value IMM from the unsigned 64-bit + * value in SRC and place the raw unsigned 64-bit result in DST. + * Overflow will wrap around. + */ + VM_DEFINE_OP (153, usub_immediate, "usub/immediate", OP1 (X8_S8_S8_C8) | OP_DST) + { + scm_t_uint8 dst, src, imm; + scm_t_uint64 x; + + UNPACK_8_8_8 (op, dst, src, imm); + x = SP_REF_U64 (src); + SP_SET_U64 (dst, x - (scm_t_uint64) imm); + NEXT (1); + } + + /* umul/immediate dst:8 src:8 imm:8 + * + * Multiply the unsigned 64-bit value from SRC by the unsigned 8-bit + * value IMM and place the raw unsigned 64-bit result in DST. + * Overflow will wrap around. + */ + VM_DEFINE_OP (154, umul_immediate, "umul/immediate", OP1 (X8_S8_S8_C8) | OP_DST) + { + scm_t_uint8 dst, src, imm; + scm_t_uint64 x; + + UNPACK_8_8_8 (op, dst, src, imm); + x = SP_REF_U64 (src); + SP_SET_U64 (dst, x * (scm_t_uint64) imm); + NEXT (1); + } + VM_DEFINE_OP (155, unused_155, NULL, NOP) VM_DEFINE_OP (156, unused_156, NULL, NOP) VM_DEFINE_OP (157, unused_157, NULL, NOP) |