1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*/
#include "basic_asm.h"
/*
* Careful this will 'clobber' vsx (by design), VSX are always
* volatile though so unlike vmx this isn't so much of an issue
* Still should avoid calling from C
*/
FUNC_START(load_vsx)
li r5,0
lxvd2x vs20,r5,r3
addi r5,r5,16
lxvd2x vs21,r5,r3
addi r5,r5,16
lxvd2x vs22,r5,r3
addi r5,r5,16
lxvd2x vs23,r5,r3
addi r5,r5,16
lxvd2x vs24,r5,r3
addi r5,r5,16
lxvd2x vs25,r5,r3
addi r5,r5,16
lxvd2x vs26,r5,r3
addi r5,r5,16
lxvd2x vs27,r5,r3
addi r5,r5,16
lxvd2x vs28,r5,r3
addi r5,r5,16
lxvd2x vs29,r5,r3
addi r5,r5,16
lxvd2x vs30,r5,r3
addi r5,r5,16
lxvd2x vs31,r5,r3
blr
FUNC_END(load_vsx)
FUNC_START(store_vsx)
li r5,0
stxvd2x vs20,r5,r3
addi r5,r5,16
stxvd2x vs21,r5,r3
addi r5,r5,16
stxvd2x vs22,r5,r3
addi r5,r5,16
stxvd2x vs23,r5,r3
addi r5,r5,16
stxvd2x vs24,r5,r3
addi r5,r5,16
stxvd2x vs25,r5,r3
addi r5,r5,16
stxvd2x vs26,r5,r3
addi r5,r5,16
stxvd2x vs27,r5,r3
addi r5,r5,16
stxvd2x vs28,r5,r3
addi r5,r5,16
stxvd2x vs29,r5,r3
addi r5,r5,16
stxvd2x vs30,r5,r3
addi r5,r5,16
stxvd2x vs31,r5,r3
blr
FUNC_END(store_vsx)
|