summaryrefslogtreecommitdiffstats
path: root/src/video_core/engines/kepler_compute.cpp
blob: 3a39aeabe613c56887db41b42927b6a5afd69337 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.

#include <bitset>
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_base.h"
#include "video_core/textures/decoders.h"

namespace Tegra::Engines {

KeplerCompute::KeplerCompute(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
                             MemoryManager& memory_manager)
    : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager}, upload_state{
                                                                                  memory_manager,
                                                                                  regs.upload} {}

KeplerCompute::~KeplerCompute() = default;

void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) {
    ASSERT_MSG(method_call.method < Regs::NUM_REGS,
               "Invalid KeplerCompute register, increase the size of the Regs structure");

    regs.reg_array[method_call.method] = method_call.argument;

    switch (method_call.method) {
    case KEPLER_COMPUTE_REG_INDEX(exec_upload): {
        upload_state.ProcessExec(regs.exec_upload.linear != 0);
        break;
    }
    case KEPLER_COMPUTE_REG_INDEX(data_upload): {
        const bool is_last_call = method_call.IsLastCall();
        upload_state.ProcessData(method_call.argument, is_last_call);
        if (is_last_call) {
            system.GPU().Maxwell3D().dirty.OnMemoryWrite();
        }
        break;
    }
    case KEPLER_COMPUTE_REG_INDEX(launch):
        ProcessLaunch();
        break;
    default:
        break;
    }
}

Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const {
    const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value();
    ASSERT(cbuf_mask[regs.tex_cb_index]);

    const auto& texinfo = launch_description.const_buffer_config[regs.tex_cb_index];
    ASSERT(texinfo.Address() != 0);

    const GPUVAddr address = texinfo.Address() + offset * sizeof(Texture::TextureHandle);
    ASSERT(address < texinfo.Address() + texinfo.size);

    const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(address)};
    return GetTextureInfo(tex_handle);
}

Texture::FullTextureInfo KeplerCompute::GetTextureInfo(Texture::TextureHandle tex_handle) const {
    return Texture::FullTextureInfo{GetTICEntry(tex_handle.tic_id), GetTSCEntry(tex_handle.tsc_id)};
}

u32 KeplerCompute::AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const {
    ASSERT(stage == ShaderType::Compute);
    const auto& buffer = launch_description.const_buffer_config[const_buffer];
    u32 result;
    std::memcpy(&result, memory_manager.GetPointer(buffer.Address() + offset), sizeof(u32));
    return result;
}

SamplerDescriptor KeplerCompute::AccessBoundSampler(ShaderType stage, u64 offset) const {
    return AccessBindlessSampler(stage, regs.tex_cb_index, offset * sizeof(Texture::TextureHandle));
}

SamplerDescriptor KeplerCompute::AccessBindlessSampler(ShaderType stage, u64 const_buffer,
                                                       u64 offset) const {
    ASSERT(stage == ShaderType::Compute);
    const auto& tex_info_buffer = launch_description.const_buffer_config[const_buffer];
    const GPUVAddr tex_info_address = tex_info_buffer.Address() + offset;

    const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(tex_info_address)};
    const Texture::FullTextureInfo tex_info = GetTextureInfo(tex_handle);
    SamplerDescriptor result = SamplerDescriptor::FromTicTexture(tex_info.tic.texture_type.Value());
    result.is_shadow.Assign(tex_info.tsc.depth_compare_enabled.Value());
    return result;
}

void KeplerCompute::ProcessLaunch() {
    const GPUVAddr launch_desc_loc = regs.launch_desc_loc.Address();
    memory_manager.ReadBlockUnsafe(launch_desc_loc, &launch_description,
                                   LaunchParams::NUM_LAUNCH_PARAMETERS * sizeof(u32));

    const GPUVAddr code_addr = regs.code_loc.Address() + launch_description.program_start;
    LOG_TRACE(HW_GPU, "Compute invocation launched at address 0x{:016x}", code_addr);

    rasterizer.DispatchCompute(code_addr);
}

Texture::TICEntry KeplerCompute::GetTICEntry(u32 tic_index) const {
    const GPUVAddr tic_address_gpu{regs.tic.Address() + tic_index * sizeof(Texture::TICEntry)};

    Texture::TICEntry tic_entry;
    memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));

    const auto r_type{tic_entry.r_type.Value()};
    const auto g_type{tic_entry.g_type.Value()};
    const auto b_type{tic_entry.b_type.Value()};
    const auto a_type{tic_entry.a_type.Value()};

    // TODO(Subv): Different data types for separate components are not supported
    DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type);

    return tic_entry;
}

Texture::TSCEntry KeplerCompute::GetTSCEntry(u32 tsc_index) const {
    const GPUVAddr tsc_address_gpu{regs.tsc.Address() + tsc_index * sizeof(Texture::TSCEntry)};

    Texture::TSCEntry tsc_entry;
    memory_manager.ReadBlockUnsafe(tsc_address_gpu, &tsc_entry, sizeof(Texture::TSCEntry));
    return tsc_entry;
}

} // namespace Tegra::Engines