summaryrefslogtreecommitdiffstats
path: root/src/shader_recompiler/frontend/maxwell/translate/impl/atomic_operations_global_memory.cpp
blob: 66f39e44e96975efcf2e434d71b41589fb9ddaab (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.

#include "common/bit_field.h"
#include "common/common_types.h"
#include "shader_recompiler/frontend/maxwell/translate/impl/impl.h"

namespace Shader::Maxwell {
namespace {
enum class AtomOp : u64 {
    ADD,
    MIN,
    MAX,
    INC,
    DEC,
    AND,
    OR,
    XOR,
    EXCH,
    SAFEADD,
};

enum class AtomSize : u64 {
    U32,
    S32,
    U64,
    F32,
    F16x2,
    S64,
};

IR::U32U64 ApplyIntegerAtomOp(IR::IREmitter& ir, const IR::U32U64& offset, const IR::U32U64& op_b,
                              AtomOp op, bool is_signed) {
    switch (op) {
    case AtomOp::ADD:
        return ir.GlobalAtomicIAdd(offset, op_b);
    case AtomOp::MIN:
        return ir.GlobalAtomicIMin(offset, op_b, is_signed);
    case AtomOp::MAX:
        return ir.GlobalAtomicIMax(offset, op_b, is_signed);
    case AtomOp::INC:
        return ir.GlobalAtomicInc(offset, op_b);
    case AtomOp::DEC:
        return ir.GlobalAtomicDec(offset, op_b);
    case AtomOp::AND:
        return ir.GlobalAtomicAnd(offset, op_b);
    case AtomOp::OR:
        return ir.GlobalAtomicOr(offset, op_b);
    case AtomOp::XOR:
        return ir.GlobalAtomicXor(offset, op_b);
    case AtomOp::EXCH:
        return ir.GlobalAtomicExchange(offset, op_b);
    default:
        throw NotImplementedException("Integer Atom Operation {}", op);
    }
}

IR::Value ApplyFpAtomOp(IR::IREmitter& ir, const IR::U64& offset, const IR::Value& op_b, AtomOp op,
                        AtomSize size) {
    static constexpr IR::FpControl f16_control{
        .no_contraction{false},
        .rounding{IR::FpRounding::RN},
        .fmz_mode{IR::FmzMode::DontCare},
    };
    static constexpr IR::FpControl f32_control{
        .no_contraction{false},
        .rounding{IR::FpRounding::RN},
        .fmz_mode{IR::FmzMode::FTZ},
    };
    switch (op) {
    case AtomOp::ADD:
        return size == AtomSize::F32 ? ir.GlobalAtomicF32Add(offset, op_b, f32_control)
                                     : ir.GlobalAtomicF16x2Add(offset, op_b, f16_control);
    case AtomOp::MIN:
        return ir.GlobalAtomicF16x2Min(offset, op_b, f16_control);
    case AtomOp::MAX:
        return ir.GlobalAtomicF16x2Max(offset, op_b, f16_control);
    default:
        throw NotImplementedException("FP Atom Operation {}", op);
    }
}

IR::U64 AtomOffset(TranslatorVisitor& v, u64 insn) {
    union {
        u64 raw;
        BitField<8, 8, IR::Reg> addr_reg;
        BitField<28, 20, s64> addr_offset;
        BitField<28, 20, u64> rz_addr_offset;
        BitField<48, 1, u64> e;
    } const mem{insn};

    const IR::U64 address{[&]() -> IR::U64 {
        if (mem.e == 0) {
            return v.ir.UConvert(64, v.X(mem.addr_reg));
        }
        return v.L(mem.addr_reg);
    }()};
    const u64 addr_offset{[&]() -> u64 {
        if (mem.addr_reg == IR::Reg::RZ) {
            // When RZ is used, the address is an absolute address
            return static_cast<u64>(mem.rz_addr_offset.Value());
        } else {
            return static_cast<u64>(mem.addr_offset.Value());
        }
    }()};
    return v.ir.IAdd(address, v.ir.Imm64(addr_offset));
}

bool AtomOpNotApplicable(AtomSize size, AtomOp op) {
    // TODO: SAFEADD
    switch (size) {
    case AtomSize::S32:
    case AtomSize::U64:
        return (op == AtomOp::INC || op == AtomOp::DEC);
    case AtomSize::S64:
        return !(op == AtomOp::MIN || op == AtomOp::MAX);
    case AtomSize::F32:
        return op != AtomOp::ADD;
    case AtomSize::F16x2:
        return !(op == AtomOp::ADD || op == AtomOp::MIN || op == AtomOp::MAX);
    default:
        return false;
    }
}

IR::U32U64 LoadGlobal(IR::IREmitter& ir, const IR::U64& offset, AtomSize size) {
    switch (size) {
    case AtomSize::U32:
    case AtomSize::S32:
    case AtomSize::F32:
    case AtomSize::F16x2:
        return ir.LoadGlobal32(offset);
    case AtomSize::U64:
    case AtomSize::S64:
        return ir.PackUint2x32(ir.LoadGlobal64(offset));
    default:
        throw NotImplementedException("Atom Size {}", size);
    }
}

void StoreResult(TranslatorVisitor& v, IR::Reg dest_reg, const IR::Value& result, AtomSize size) {
    switch (size) {
    case AtomSize::U32:
    case AtomSize::S32:
    case AtomSize::F16x2:
        return v.X(dest_reg, IR::U32{result});
    case AtomSize::U64:
    case AtomSize::S64:
        return v.L(dest_reg, IR::U64{result});
    case AtomSize::F32:
        return v.F(dest_reg, IR::F32{result});
    default:
        break;
    }
}

IR::Value ApplyAtomOp(TranslatorVisitor& v, IR::Reg operand_reg, const IR::U64& offset,
                      AtomSize size, AtomOp op) {
    switch (size) {
    case AtomSize::U32:
    case AtomSize::S32:
        return ApplyIntegerAtomOp(v.ir, offset, v.X(operand_reg), op, size == AtomSize::S32);
    case AtomSize::U64:
    case AtomSize::S64:
        return ApplyIntegerAtomOp(v.ir, offset, v.L(operand_reg), op, size == AtomSize::S64);
    case AtomSize::F32:
        return ApplyFpAtomOp(v.ir, offset, v.F(operand_reg), op, size);
    case AtomSize::F16x2: {
        return ApplyFpAtomOp(v.ir, offset, v.ir.UnpackFloat2x16(v.X(operand_reg)), op, size);
    }
    default:
        throw NotImplementedException("Atom Size {}", size);
    }
}

void GlobalAtomic(TranslatorVisitor& v, IR::Reg dest_reg, IR::Reg operand_reg,
                  const IR::U64& offset, AtomSize size, AtomOp op, bool write_dest) {
    IR::Value result;
    if (AtomOpNotApplicable(size, op)) {
        result = LoadGlobal(v.ir, offset, size);
    } else {
        result = ApplyAtomOp(v, operand_reg, offset, size, op);
    }
    if (write_dest) {
        StoreResult(v, dest_reg, result, size);
    }
}
} // Anonymous namespace

void TranslatorVisitor::ATOM(u64 insn) {
    union {
        u64 raw;
        BitField<0, 8, IR::Reg> dest_reg;
        BitField<20, 8, IR::Reg> operand_reg;
        BitField<49, 3, AtomSize> size;
        BitField<52, 4, AtomOp> op;
    } const atom{insn};
    const IR::U64 offset{AtomOffset(*this, insn)};
    GlobalAtomic(*this, atom.dest_reg, atom.operand_reg, offset, atom.size, atom.op, true);
}

void TranslatorVisitor::RED(u64 insn) {
    union {
        u64 raw;
        BitField<0, 8, IR::Reg> operand_reg;
        BitField<20, 3, AtomSize> size;
        BitField<23, 3, AtomOp> op;
    } const red{insn};
    const IR::U64 offset{AtomOffset(*this, insn)};
    GlobalAtomic(*this, IR::Reg::RZ, red.operand_reg, offset, red.size, red.op, true);
}

} // namespace Shader::Maxwell