mirror of
https://github.com/hedge-dev/XenonRecomp.git
synced 2025-06-06 18:31:03 +00:00
104 lines
4.2 KiB
C#
104 lines
4.2 KiB
C#
# CS_ARCH_ARM, CS_MODE_THUMB+CS_MODE_MCLASS, None
|
|
0xb6,0xff,0x40,0x24 = vrintn.f16 q1, q0
|
|
0xba,0xff,0x48,0x04 = vrintn.f32 q0, q4
|
|
0xb6,0xff,0x42,0x05 = vrinta.f16 q0, q1
|
|
0xba,0xff,0x46,0x25 = vrinta.f32 q1, q3
|
|
0xb6,0xff,0xca,0x06 = vrintm.f16 q0, q5
|
|
0xba,0xff,0xc8,0x06 = vrintm.f32 q0, q4
|
|
0xb6,0xff,0xc0,0x27 = vrintp.f16 q1, q0
|
|
0xba,0xff,0xc2,0x07 = vrintp.f32 q0, q1
|
|
0xb6,0xff,0xc4,0x24 = vrintx.f16 q1, q2
|
|
0xba,0xff,0xc2,0x24 = vrintx.f32 q1, q1
|
|
0xb6,0xff,0xcc,0x25 = vrintz.f16 q1, q6
|
|
0xba,0xff,0xc0,0x25 = vrintz.f32 q1, q0
|
|
0xb6,0xee,0x60,0x0a = vrintr.f32 s0, s1
|
|
0xb6,0xee,0x41,0x0b = vrintr.f64 d0, d1
|
|
0x12,0xff,0x56,0x4d = vmul.f16 q2, q1, q3
|
|
0x00,0xff,0x5a,0x0d = vmul.f32 q0, q0, q5
|
|
0x24,0xfc,0x42,0x68 = vcmla.f16 q3, q2, q1, #0
|
|
0xa0,0xfc,0x4a,0x08 = vcmla.f16 q0, q0, q5, #0x5a
|
|
0x2e,0xfd,0x44,0x68 = vcmla.f16 q3, q7, q2, #0xb4
|
|
0xae,0xfd,0x4c,0x48 = vcmla.f16 q2, q7, q6, #0x10e
|
|
0x3c,0xfc,0x4c,0x48 = vcmla.f32 q2, q6, q6, #0
|
|
0xb2,0xfc,0x46,0xe8 = vcmla.f32 q7, q1, q3, #0x5a
|
|
0x3a,0xfd,0x46,0x88 = vcmla.f32 q4, q5, q3, #0xb4
|
|
0xb4,0xfd,0x4e,0x68 = vcmla.f32 q3, q2, q7, #0x10e
|
|
0x14,0xef,0x56,0x0c = vfma.f16 q0, q2, q3
|
|
0x06,0xef,0x5e,0x0c = vfma.f32 q0, q3, q7
|
|
0x34,0xef,0x5a,0x0c = vfms.f16 q0, q2, q5
|
|
0x22,0xef,0x54,0x2c = vfms.f32 q1, q1, q2
|
|
0x10,0xef,0x4a,0x0d = vadd.f16 q0, q0, q5
|
|
0x06,0xef,0x40,0x2d = vadd.f32 q1, q3, q0
|
|
0x02,0xef,0x44,0x0d = vadd.f32 q0, q1, q2
|
|
0x82,0xfc,0x4e,0x48 = vcadd.f16 q2, q1, q7, #0x5a
|
|
0x8a,0xfd,0x4e,0x48 = vcadd.f16 q2, q5, q7, #0x10e
|
|
0x98,0xfc,0x4e,0x08 = vcadd.f32 q0, q4, q7, #0x5a
|
|
0x94,0xfd,0x46,0x48 = vcadd.f32 q2, q2, q3, #0x10e
|
|
0x30,0xff,0x4c,0x0d = vabd.f16 q0, q0, q6
|
|
0x22,0xff,0x48,0x0d = vabd.f32 q0, q1, q4
|
|
0xbf,0xef,0x5e,0x2c = vcvt.f16.s16 q1, q7, #1
|
|
0xb0,0xef,0x5e,0x2c = vcvt.f16.s16 q1, q7, #0x10
|
|
0xb5,0xef,0x5e,0x2c = vcvt.f16.s16 q1, q7, #0xb
|
|
0xbd,0xef,0x52,0x2d = vcvt.s16.f16 q1, q1, #3
|
|
0xb6,0xff,0x52,0x4c = vcvt.f16.u16 q2, q1, #0xa
|
|
0xbd,0xff,0x50,0x0d = vcvt.u16.f16 q0, q0, #3
|
|
0xbf,0xef,0x5e,0x2e = vcvt.f32.s32 q1, q7, #1
|
|
0xa0,0xef,0x5e,0x2e = vcvt.f32.s32 q1, q7, #0x20
|
|
0xba,0xef,0x5e,0x2e = vcvt.f32.s32 q1, q7, #6
|
|
0xab,0xef,0x50,0x2f = vcvt.s32.f32 q1, q0, #0x15
|
|
0xbc,0xff,0x58,0x2e = vcvt.f32.u32 q1, q4, #4
|
|
0xb8,0xff,0x5a,0x2f = vcvt.u32.f32 q1, q5, #8
|
|
0xb7,0xff,0x42,0x06 = vcvt.f16.s16 q0, q1
|
|
0xb7,0xff,0xc8,0x06 = vcvt.f16.u16 q0, q4
|
|
0xb7,0xff,0x40,0x07 = vcvt.s16.f16 q0, q0
|
|
0xb7,0xff,0xc0,0x07 = vcvt.u16.f16 q0, q0
|
|
0xbb,0xff,0x40,0x06 = vcvt.f32.s32 q0, q0
|
|
0xbb,0xff,0xc0,0x06 = vcvt.f32.u32 q0, q0
|
|
0xbb,0xff,0x40,0x07 = vcvt.s32.f32 q0, q0
|
|
0xbb,0xff,0xc4,0x07 = vcvt.u32.f32 q0, q2
|
|
0xb7,0xff,0x4e,0x00 = vcvta.s16.f16 q0, q7
|
|
0xbc,0xfe,0xe1,0x1a = vcvta.s32.f32 s2, s3
|
|
0xb7,0xff,0x4e,0x00 = vcvta.s16.f16 q0, q7
|
|
0xbb,0xff,0xcc,0xe1 = vcvtn.u32.f32 q7, q6
|
|
0xbb,0xff,0x4e,0x02 = vcvtp.s32.f32 q0, q7
|
|
0xbb,0xff,0xc8,0x23 = vcvtm.u32.f32 q1, q4
|
|
0xb5,0xff,0xce,0x07 = vneg.f16 q0, q7
|
|
0xb9,0xff,0xc4,0x07 = vneg.f32 q0, q2
|
|
0xb5,0xff,0x44,0x07 = vabs.f16 q0, q2
|
|
0xb9,0xff,0x40,0x07 = vabs.f32 q0, q0
|
|
0x3f,0xfe,0x83,0x2e = vmaxnma.f16 q1, q1
|
|
0x3f,0xee,0x8d,0x4e = vmaxnma.f32 q2, q6
|
|
0x3f,0xfe,0x85,0x1e = vminnma.f16 q0, q2
|
|
0x3f,0xee,0x83,0x1e = vminnma.f32 q0, q1
|
|
0x08,0xbf = it eq
|
|
0x30,0xee,0x20,0x0a = vaddeq.f32 s0, s0, s1
|
|
0x71,0xfe,0x4d,0x0f = vpst
|
|
0x12,0xef,0x44,0x0d = vaddt.f16 q0, q1, q2
|
|
0x71,0xfe,0x4d,0x8f = vpste
|
|
0xbb,0xff,0xc2,0x03 = vcvtmt.u32.f32 q0, q1
|
|
0xbb,0xff,0x42,0x01 = vcvtne.s32.f32 q0, q1
|
|
0x18,0xbf = it ne
|
|
0xbd,0xee,0xe0,0x0a = vcvtne.s32.f32 s0, s1
|
|
0xa8,0xbf = it ge
|
|
0xb2,0xee,0xe0,0x3b = vcvttge.f64.f16 d3, s1
|
|
0x77,0xee,0xc1,0x9f = vpte.f32 lt, q3, r1
|
|
0xbb,0xff,0xc0,0x47 = vcvtt.u32.f32 q2, q0
|
|
0xbb,0xff,0xc0,0x27 = vcvte.u32.f32 q1, q0
|
|
0x0c,0xbf = ite eq
|
|
0xbc,0xee,0xe0,0x0a = vcvteq.u32.f32 s0, s1
|
|
0xb8,0xee,0x60,0x0a = vcvtne.f32.u32 s0, s1
|
|
0x71,0xfe,0x4d,0x8f = vpste
|
|
0x12,0xff,0x54,0x0d = vmult.f16 q0, q1, q2
|
|
0x12,0xff,0x54,0x0d = vmule.f16 q0, q1, q2
|
|
0x0c,0xbf = ite eq
|
|
0x20,0xee,0x01,0x0b = vmuleq.f64 d0, d0, d1
|
|
0x20,0xee,0x02,0x1b = vmulne.f64 d1, d0, d2
|
|
0x08,0xbf = it eq
|
|
0xb1,0xee,0x60,0x0a = vnegeq.f32 s0, s1
|
|
0x04,0xbf = itt eq
|
|
0x20,0xee,0xc1,0x0a = vnmuleq.f32 s0, s1, s2
|
|
0x20,0xee,0x81,0x0a = vmuleq.f32 s0, s1, s2
|
|
0x71,0xfe,0x4d,0x8f = vpste
|
|
0xb6,0xff,0x42,0x04 = vrintnt.f16 q0, q1
|
|
0xba,0xff,0x42,0x04 = vrintne.f32 q0, q1
|