Optimizations (#1680)

* Optimizations

1) Some headers simplified for better compilation time
2) Some templates simplified for smaller executable size
3) Eliminate std::future to fix compilation for mingw64
4) PKG installation can be cancelled now
5) cellGame fixes
6) XAudio2 fix for mingw64
7) PPUInterpreter bug fixed (Clang)

* any_pod<> implemented

Aliases: any16, any32, any64
rsx::make_command fixed
This commit is contained in:
Ivan 2016-04-25 13:49:12 +03:00
parent 75fe95eeb1
commit da7472fe81
96 changed files with 2086 additions and 1772 deletions

1
.gitmodules vendored
View file

@ -22,6 +22,7 @@
[submodule "libpng"]
path = 3rdparty/libpng
url = https://github.com/RPCS3/libpng
ignore = dirty
[submodule "Vulkan/glslang"]
path = Vulkan/glslang
url = https://github.com/KhronosGroup/glslang.git

View file

@ -62,6 +62,19 @@ DEFINE_IID(IXAudio2, 8bcf1f58, 9fe7, 4583, 8a, c6, e2, ad, c4, 65, c8, bb);
#include <objbase.h> // Windows COM declarations
#endif
#ifndef _MSC_VER
#define __in
#define __in_opt
#define __in_bcount(x)
#define __in_ecount(x)
#define __deref_out
#define __out
#define __out_ecount(x)
#define __out_bcount(x)
#define __inout
#define __reserved
#endif
#include <sal.h> // Markers for documenting API semantics
#include <audiodefs.h> // Basic audio data types and constants
#include <xma2defs.h> // Data types and constants for XMA2 audio

View file

@ -21,7 +21,13 @@
// GUID definitions by defining the INITGUID preprocessor constant or by linking
// to a GUID library. This works in either C or C++.
#ifdef __cplusplus
#ifndef _MSC_VER
#define DEFINE_UUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) __CRT_UUID_DECL(name, 0x##l, 0x##w1, 0x##w2, 0x##b1, 0x##b2, 0x##b3, 0x##b4, 0x##b5, 0x##b6, 0x##b7, 0x##b8)
#define DEFINE_CLSID(className, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) class className; DEFINE_UUID(className, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8)
#define DEFINE_IID(interfaceName, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) struct interfaceName; DEFINE_UUID(interfaceName, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8)
#elif __cplusplus
#define DECLSPEC_UUID_WRAPPER(x) __declspec(uuid(#x))
#ifdef INITGUID

View file

@ -17,7 +17,6 @@ set(PNG_TESTS OFF CACHE BOOL "Build tests." FORCE)
add_definitions(-DCMAKE_BUILD)
add_subdirectory( asmjit )
add_subdirectory( 3rdparty/libpng )
# TODO: do real installation, including copying directory structure
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE "${PROJECT_BINARY_DIR}/bin")

View file

@ -976,4 +976,17 @@ public:
{
return atomic_op(atomic_test_and_complement<type, T2>{}, rhs);
}
// Minimal pointer support (TODO: must forward operator ->())
type operator ->() const
{
return load();
}
// Minimal array support
template<typename I = std::size_t>
auto operator [](const I& index) const -> decltype(std::declval<const type>()[std::declval<I>()])
{
return load()[index];
}
};

150
Utilities/AtomicPtr.h Normal file
View file

@ -0,0 +1,150 @@
#pragma once
#include "Atomic.h"
#include <memory>
// Unfinished. Only std::default_delete will work as expected.
template<typename T, typename D>
class atomic_ptr_base : D
{
protected:
atomic_t<T*> m_ptr;
constexpr atomic_ptr_base(T* ptr)
: m_ptr(ptr)
{
}
public:
~atomic_ptr_base()
{
if (m_ptr)
{
(*this)(m_ptr.load());
}
}
D& get_deleter()
{
return *this;
}
const D& get_deleter() const
{
return *this;
}
};
// Simple atomic pointer with unique ownership. Draft, unfinished.
template<typename T, typename D = std::default_delete<T>>
class atomic_ptr final : atomic_ptr_base<T, D>
{
using base = atomic_ptr_base<T, D>;
static_assert(sizeof(T*) == sizeof(base), "atomic_ptr<> error: invalid deleter (empty class expected)");
public:
constexpr atomic_ptr()
: base(nullptr)
{
}
constexpr atomic_ptr(std::nullptr_t)
: base(nullptr)
{
}
explicit atomic_ptr(T* ptr)
: base(ptr)
{
}
template<typename T2, typename = std::enable_if_t<std::is_convertible<T2, T>::value>>
atomic_ptr(std::unique_ptr<T2, D>&& ptr)
: base(ptr.release())
{
}
atomic_ptr& operator =(std::nullptr_t)
{
if (T* old = base::m_ptr.exchange(nullptr))
{
this->get_deleter()(old);
}
return *this;
}
template<typename T2, typename = std::enable_if_t<std::is_convertible<T2, T>::value>>
atomic_ptr& operator =(std::unique_ptr<T2, D>&& ptr)
{
if (T* old = base::m_ptr.exchange(ptr.release()))
{
this->get_deleter()(old);
}
return *this;
}
void swap(std::unique_ptr<T, D>& ptr)
{
ptr.reset(base::m_ptr.exchange(ptr.release()));
}
std::add_lvalue_reference_t<T> operator *() const
{
return *base::m_ptr;
}
T* operator ->() const
{
return base::m_ptr;
}
T* get() const
{
return base::m_ptr;
}
explicit operator bool() const
{
return base::m_ptr != nullptr;
}
T* release() const
{
return base::m_ptr.exchange(0);
}
void reset(T* ptr = nullptr)
{
if (T* old = base::m_ptr.exchange(ptr))
{
this->get_deleter()(old);
}
}
// Steal the pointer from `ptr`, convert old value to unique_ptr
std::unique_ptr<T, D> exchange(std::unique_ptr<T, D>&& ptr)
{
return std::unique_ptr<T, D>(base::m_ptr.exchange(ptr.release()));
}
// If pointer is null, steal it from `ptr`
bool test_and_swap(std::unique_ptr<T, D>&& ptr)
{
if (base::m_ptr.compare_and_swap_test(nullptr, ptr.get()))
{
ptr.release();
return true;
}
return false;
}
};
template<typename T, typename D>
class atomic_ptr<T[], D> final : atomic_ptr_base<T[], D>
{
// TODO
};

View file

@ -910,20 +910,15 @@ template<typename T> using atomic_be_t = atomic_t<be_t<T>>;
template<typename T> using atomic_le_t = atomic_t<le_t<T>>;
#endif
namespace fmt
// Formatting for BE/LE data
template<typename T, bool Se>
struct unveil<se_t<T, Se>, void>
{
// Formatting for BE/LE data
template<typename T, bool Se>
struct unveil<se_t<T, Se>, void>
static inline auto get(const se_t<T, Se>& arg)
{
using result_type = typename unveil<T>::result_type;
static inline result_type get_value(const se_t<T, Se>& arg)
{
return unveil<T>::get_value(arg);
}
};
}
return unveil<T>::get(arg);
}
};
#undef IS_BINARY_COMPARABLE
#undef IS_INTEGER

View file

@ -1,8 +1,17 @@
#pragma once
#include "Utilities/types.h"
#include "Utilities/Atomic.h"
#include <initializer_list>
#include <exception>
#include <utility>
#include <string>
#include <vector>
#include <set>
#include <unordered_map>
#include <map>
#include <mutex>
namespace cfg
{

View file

@ -2,13 +2,13 @@
#include "StrFmt.h"
#include "Macro.h"
#include "SharedMutex.h"
#include <unordered_map>
#include <algorithm>
#ifdef _WIN32
#include <cwchar>
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0601
#include <Windows.h>
static std::unique_ptr<wchar_t[]> to_wchar(const std::string& source)
@ -97,7 +97,7 @@ namespace fs
std::unordered_map<std::string, std::shared_ptr<device_base>> m_map;
public:
std::shared_ptr<device_base> get_device(const std::string& name);
std::shared_ptr<device_base> get_device(const std::string& path);
std::shared_ptr<device_base> set_device(const std::string& name, const std::shared_ptr<device_base>&);
};
@ -109,11 +109,11 @@ namespace fs
}
}
safe_buffers std::shared_ptr<fs::device_base> fs::device_manager::get_device(const std::string& name)
std::shared_ptr<fs::device_base> fs::device_manager::get_device(const std::string& path)
{
reader_lock lock(m_mutex);
const auto found = m_map.find(name);
const auto found = m_map.find(path.substr(0, path.find_first_of('/', 2)));
if (found == m_map.end())
{
@ -123,29 +123,27 @@ safe_buffers std::shared_ptr<fs::device_base> fs::device_manager::get_device(con
return found->second;
}
safe_buffers std::shared_ptr<fs::device_base> fs::device_manager::set_device(const std::string& name, const std::shared_ptr<device_base>& device)
std::shared_ptr<fs::device_base> fs::device_manager::set_device(const std::string& name, const std::shared_ptr<device_base>& device)
{
std::lock_guard<shared_mutex> lock(m_mutex);
writer_lock lock(m_mutex);
return m_map[name] = device;
}
safe_buffers std::shared_ptr<fs::device_base> fs::get_virtual_device(const std::string& path)
std::shared_ptr<fs::device_base> fs::get_virtual_device(const std::string& path)
{
// Every virtual device path must have "//" at the beginning
if (path.size() > 2 && reinterpret_cast<const u16&>(path.front()) == '//')
if (path.size() > 2 && reinterpret_cast<const u16&>(path.front()) == "//"_u16)
{
return get_device_manager().get_device(path.substr(0, path.find_first_of('/', 2)));
return get_device_manager().get_device(path);
}
return nullptr;
}
safe_buffers std::shared_ptr<fs::device_base> fs::set_virtual_device(const std::string& name, const std::shared_ptr<device_base>& device)
std::shared_ptr<fs::device_base> fs::set_virtual_device(const std::string& name, const std::shared_ptr<device_base>& device)
{
Expects(name.size() > 2);
Expects(name[0] == '/');
Expects(name[1] == '/');
Expects(name.size() > 2 && name[0] == '/' && name[1] == '/' && name.find('/', 2) == -1);
return get_device_manager().set_device(name, device);
}
@ -1005,17 +1003,17 @@ bool fs::file::open(const std::string& path, mset<open_mode> mode)
fs::file::file(const void* ptr, std::size_t size)
{
class memory_stream final : public file_base
class memory_stream : public file_base
{
u64 m_pos = 0;
u64 m_size;
u64 m_pos{}; // TODO: read/seek could modify m_pos atomically
const char* const m_ptr;
const u64 m_size;
public:
const char* const ptr;
memory_stream(const void* ptr, std::size_t size)
: m_size(size)
, ptr(static_cast<const char*>(ptr))
memory_stream(const void* ptr, u64 size)
: m_ptr(static_cast<const char*>(ptr))
, m_size(size)
{
}
@ -1034,7 +1032,7 @@ fs::file::file(const void* ptr, std::size_t size)
const u64 start = m_pos;
const u64 end = seek(count, fs::seek_cur);
const u64 read_size = end >= start ? end - start : throw std::logic_error("memory_stream::read(): overflow");
std::memcpy(buffer, ptr + start, read_size);
std::memcpy(buffer, m_ptr + start, read_size);
return read_size;
}
@ -1045,10 +1043,10 @@ fs::file::file(const void* ptr, std::size_t size)
u64 seek(s64 offset, fs::seek_mode whence) override
{
return m_pos =
whence == fs::seek_set ? std::min<u64>(offset, m_size) :
whence == fs::seek_cur ? std::min<u64>(offset + m_pos, m_size) :
whence == fs::seek_end ? std::min<u64>(offset + m_size, m_size) :
return
whence == fs::seek_set ? m_pos = std::min<u64>(offset, m_size) :
whence == fs::seek_cur ? m_pos = std::min<u64>(offset + m_pos, m_size) :
whence == fs::seek_end ? m_pos = std::min<u64>(offset + m_size, m_size) :
throw std::logic_error("memory_stream::seek(): invalid whence");
}
@ -1061,93 +1059,6 @@ fs::file::file(const void* ptr, std::size_t size)
m_file = std::make_unique<memory_stream>(ptr, size);
}
fs::file::file(std::vector<char>& vec)
{
class vector_stream final : public file_base
{
u64 m_pos = 0;
public:
std::vector<char>& vec;
vector_stream(std::vector<char>& vec)
: vec(vec)
{
}
fs::stat_t stat() override
{
throw std::logic_error("vector_stream doesn't support stat()");
}
bool trunc(u64 length) override
{
vec.resize(length);
return true;
}
u64 read(void* buffer, u64 count) override
{
const u64 start = m_pos;
const u64 end = seek(count, fs::seek_cur);
const u64 read_size = end >= start ? end - start : throw std::logic_error("vector_stream::read(): overflow");
std::memcpy(buffer, vec.data() + start, read_size);
return read_size;
}
u64 write(const void* buffer, u64 count) override
{
throw std::logic_error("TODO: vector_stream doesn't support write()");
}
u64 seek(s64 offset, fs::seek_mode whence) override
{
return m_pos =
whence == fs::seek_set ? std::min<u64>(offset, vec.size()) :
whence == fs::seek_cur ? std::min<u64>(offset + m_pos, vec.size()) :
whence == fs::seek_end ? std::min<u64>(offset + vec.size(), vec.size()) :
throw std::logic_error("vector_stream::seek(): invalid whence");
}
u64 size() override
{
return vec.size();
}
};
m_file = std::make_unique<vector_stream>(vec);
}
//void fs::file_read_map::reset(const file& f)
//{
// reset();
//
// if (f)
// {
//#ifdef _WIN32
// const HANDLE handle = ::CreateFileMapping((HANDLE)f.m_fd, NULL, PAGE_READONLY, 0, 0, NULL);
// m_ptr = (char*)::MapViewOfFile(handle, FILE_MAP_READ, 0, 0, 0);
// m_size = f.size();
// ::CloseHandle(handle);
//#else
// m_ptr = (char*)::mmap(nullptr, m_size = f.size(), PROT_READ, MAP_SHARED, f.m_fd, 0);
// if (m_ptr == (void*)-1) m_ptr = nullptr;
//#endif
// }
//}
//
//void fs::file_read_map::reset()
//{
// if (m_ptr)
// {
//#ifdef _WIN32
// ::UnmapViewOfFile(m_ptr);
//#else
// ::munmap(m_ptr, m_size);
//#endif
// }
//}
void fs::dir::xnull() const
{
throw std::logic_error("fs::dir is null");

View file

@ -161,9 +161,6 @@ namespace fs
// Open memory for read
explicit file(const void* ptr, std::size_t size);
// Open vector
explicit file(std::vector<char>& vec);
// Check whether the handle is valid (opened file)
explicit operator bool() const
{
@ -214,7 +211,7 @@ namespace fs
return m_file->write(buffer, count);
}
// Change current position, returns previous position
// Change current position, returns resulting position
u64 seek(s64 offset, seek_mode whence = seek_set) const
{
if (!m_file) xnull();

View file

@ -1,4 +1,5 @@
#include "Log.h"
#include <cstdarg>
namespace _log
{
@ -23,9 +24,12 @@ namespace _log
thread_local std::string(*g_tls_make_prefix)(const channel&, level, const std::string&) = nullptr;
}
void _log::broadcast(const _log::channel& ch, _log::level sev, const std::string& text)
void _log::channel::broadcast(const _log::channel& ch, _log::level sev, const char* fmt...)
{
get_logger().log(ch, sev, text);
va_list args;
va_start(args, fmt);
get_logger().log(ch, sev, fmt::_vformat(fmt, args));
va_end(args);
}
[[noreturn]] extern void catch_all_exceptions();

View file

@ -22,9 +22,6 @@ namespace _log
struct channel;
struct listener;
// Send log message to global logger instance
void broadcast(const channel& ch, level sev, const std::string& text);
// Log channel
struct channel
{
@ -42,23 +39,23 @@ namespace _log
}
// Log without formatting
force_inline void log(level sev, const std::string& text) const
void log(level sev, const std::string& text) const
{
if (sev <= enabled)
broadcast(*this, sev, text);
broadcast(*this, sev, "%s", text.c_str());
}
// Log with formatting
template<typename... Args>
force_inline safe_buffers void format(level sev, const char* fmt, const Args&... args) const
void format(level sev, const char* fmt, const Args&... args) const
{
if (sev <= enabled)
broadcast(*this, sev, fmt::format(fmt, fmt::do_unveil(args)...));
broadcast(*this, sev, fmt, ::unveil<Args>::get(args)...);
}
#define GEN_LOG_METHOD(_sev)\
template<typename... Args>\
force_inline void _sev(const char* fmt, const Args&... args) const\
void _sev(const char* fmt, const Args&... args) const\
{\
return format<Args...>(level::_sev, fmt, args...);\
}
@ -72,6 +69,10 @@ namespace _log
GEN_LOG_METHOD(trace)
#undef GEN_LOG_METHOD
private:
// Send log message to global logger instance
static void broadcast(const channel& ch, level sev, const char* fmt...);
};
// Log listener (destination)

View file

@ -13,25 +13,28 @@
#include <x86intrin.h>
#endif
#ifdef _MSC_VER
#define ASSUME(cond) __assume(cond)
#define LIKELY(cond) (cond)
#define UNLIKELY(cond) (cond)
#else
#define ASSUME(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
#define LIKELY(cond) __builtin_expect(!!(cond), 1)
#define UNLIKELY(cond) __builtin_expect(!!(cond), 0)
#endif
// Some platforms don't support thread_local well yet.
#ifndef _MSC_VER
#define thread_local __thread
#define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
#endif
#if defined(_MSC_VER)
#define safe_buffers __declspec(safebuffers)
#else
#define safe_buffers
#endif
#if defined(_MSC_VER)
#ifdef _MSC_VER
#define never_inline __declspec(noinline)
#else
#define never_inline __attribute__((noinline))
#endif
#if defined(_MSC_VER)
#ifdef _MSC_VER
#define force_inline __forceinline
#else
#define force_inline __attribute__((always_inline)) inline

View file

@ -1,5 +1,8 @@
#pragma once
#include "types.h"
#include "Atomic.h"
class semaphore_t
{
// semaphore mutex
@ -34,4 +37,4 @@ public:
void wait();
bool post_and_wait();
};
};

157
Utilities/SharedMutex.cpp Normal file
View file

@ -0,0 +1,157 @@
#include "SharedMutex.h"
#include <mutex>
#include <condition_variable>
struct shared_mutex::internal
{
std::mutex mutex;
std::size_t rq_size{}; // Reader queue size (threads waiting on m_rcv)
std::size_t wq_size{}; // Writer queue size (threads waiting on m_wcv and m_ocv)
std::condition_variable rcv; // Reader queue
std::condition_variable wcv; // Writer queue
std::condition_variable ocv; // For current exclusive owner
};
shared_mutex::~shared_mutex()
{
delete m_data;
}
void shared_mutex::initialize_once()
{
if (!m_data)
{
auto ptr = new shared_mutex::internal;
if (!m_data.compare_and_swap_test(nullptr, ptr))
{
delete ptr;
}
}
}
void shared_mutex::lock_shared_hard()
{
initialize_once();
std::unique_lock<std::mutex> lock(m_data->mutex);
// Validate
if ((m_ctrl & SM_INVALID_BIT) != 0) throw std::runtime_error("shared_mutex::lock_shared(): Invalid bit");
if ((m_ctrl & SM_READER_MASK) == 0) throw std::runtime_error("shared_mutex::lock_shared(): No readers");
// Notify non-zero reader queue size
m_ctrl |= SM_WAITERS_BIT, m_data->rq_size++;
// Fix excess reader count
if ((--m_ctrl & SM_READER_MASK) == 0 && m_data->wq_size)
{
// Notify exclusive owner
m_data->ocv.notify_one();
}
// Obtain the reader lock
while (true)
{
const auto ctrl = m_ctrl.load();
// Check writers and reader limit
if (m_data->wq_size || (ctrl & ~SM_WAITERS_BIT) >= SM_READER_MAX)
{
m_data->rcv.wait(lock);
continue;
}
if (m_ctrl.compare_and_swap_test(ctrl, ctrl + 1))
{
break;
}
}
if (!--m_data->rq_size && !m_data->wq_size)
{
m_ctrl &= ~SM_WAITERS_BIT;
}
}
void shared_mutex::unlock_shared_notify()
{
initialize_once();
// Mutex is locked for reliable notification because m_ctrl has been changed outside
std::lock_guard<std::mutex> lock(m_data->mutex);
if ((m_ctrl & SM_READER_MASK) == 0 && m_data->wq_size)
{
// Notify exclusive owner
m_data->ocv.notify_one();
}
else if (m_data->rq_size)
{
// Notify other readers
m_data->rcv.notify_one();
}
}
void shared_mutex::lock_hard()
{
initialize_once();
std::unique_lock<std::mutex> lock(m_data->mutex);
// Validate
if ((m_ctrl & SM_INVALID_BIT) != 0) throw std::runtime_error("shared_mutex::lock(): Invalid bit");
// Notify non-zero writer queue size
m_ctrl |= SM_WAITERS_BIT, m_data->wq_size++;
// Obtain the writer lock
while (true)
{
const auto ctrl = m_ctrl.load();
if (ctrl & SM_WRITER_LOCK)
{
m_data->wcv.wait(lock);
continue;
}
if (m_ctrl.compare_and_swap_test(ctrl, ctrl | SM_WRITER_LOCK))
{
break;
}
}
// Wait for remaining readers
while ((m_ctrl & SM_READER_MASK) != 0)
{
m_data->ocv.wait(lock);
}
if (!--m_data->wq_size && !m_data->rq_size)
{
m_ctrl &= ~SM_WAITERS_BIT;
}
}
void shared_mutex::unlock_notify()
{
initialize_once();
// Mutex is locked for reliable notification because m_ctrl has been changed outside
std::lock_guard<std::mutex> lock(m_data->mutex);
if (m_data->wq_size)
{
// Notify next exclusive owner
m_data->wcv.notify_one();
}
else if (m_data->rq_size)
{
// Notify all readers
m_data->rcv.notify_all();
}
}

View file

@ -1,22 +1,16 @@
#pragma once
#include <cstdint>
#include <exception>
#include <thread>
#include <mutex>
#include <condition_variable>
#include "types.h"
#include "Atomic.h"
#include "Platform.h"
//! An attempt to create effective implementation of "shared mutex", lock-free in optimistic case.
//! All locking and unlocking may be done by single LOCK XADD or LOCK CMPXCHG instructions.
//! All locking and unlocking may be done by a single LOCK XADD or LOCK CMPXCHG instruction.
//! MSVC implementation of std::shared_timed_mutex seems suboptimal.
//! std::shared_mutex is not available until C++17.
class shared_mutex final
{
using ctrl_type = u32;
enum : ctrl_type
enum : u32
{
SM_WRITER_LOCK = 1u << 31, // Exclusive lock flag, must be MSB
SM_WAITERS_BIT = 1u << 30, // Flag set if m_wq_size or m_rq_size is non-zero
@ -26,186 +20,79 @@ class shared_mutex final
SM_READER_MAX = 1u << 24, // Max reader count
};
atomic_t<ctrl_type> m_ctrl{}; // Control atomic variable: reader count | SM_* flags
atomic_t<u32> m_ctrl{}; // Control variable: reader count | SM_* flags
std::mutex m_mutex;
struct internal;
std::size_t m_rq_size{}; // Reader queue size (threads waiting on m_rcv)
std::size_t m_wq_size{}; // Writer queue size (threads waiting on m_wcv and m_ocv)
std::condition_variable m_rcv; // Reader queue
std::condition_variable m_wcv; // Writer queue
std::condition_variable m_ocv; // For current exclusive owner
atomic_t<internal*> m_data{}; // Internal data
void lock_shared_hard()
{
std::unique_lock<std::mutex> lock(m_mutex);
void lock_shared_hard();
void unlock_shared_notify();
// Validate
if ((m_ctrl & SM_INVALID_BIT) != 0) throw std::runtime_error("shared_mutex::lock_shared(): Invalid bit");
if ((m_ctrl & SM_READER_MASK) == 0) throw std::runtime_error("shared_mutex::lock_shared(): No readers");
// Notify non-zero reader queue size
m_ctrl |= SM_WAITERS_BIT, m_rq_size++;
// Fix excess reader count
if ((--m_ctrl & SM_READER_MASK) == 0 && m_wq_size)
{
// Notify exclusive owner
m_ocv.notify_one();
}
// Obtain the reader lock
while (true)
{
const auto ctrl = m_ctrl.load();
// Check writers and reader limit
if (m_wq_size || (ctrl & ~SM_WAITERS_BIT) >= SM_READER_MAX)
{
m_rcv.wait(lock);
continue;
}
if (m_ctrl.compare_and_swap_test(ctrl, ctrl + 1))
{
break;
}
}
if (!--m_rq_size && !m_wq_size)
{
m_ctrl &= ~SM_WAITERS_BIT;
}
}
void unlock_shared_notify()
{
// Mutex is locked for reliable notification because m_ctrl has been changed outside
std::lock_guard<std::mutex> lock(m_mutex);
if ((m_ctrl & SM_READER_MASK) == 0 && m_wq_size)
{
// Notify exclusive owner
m_ocv.notify_one();
}
else if (m_rq_size)
{
// Notify other readers
m_rcv.notify_one();
}
}
void lock_hard()
{
std::unique_lock<std::mutex> lock(m_mutex);
// Validate
if ((m_ctrl & SM_INVALID_BIT) != 0) throw std::runtime_error("shared_mutex::lock(): Invalid bit");
// Notify non-zero writer queue size
m_ctrl |= SM_WAITERS_BIT, m_wq_size++;
// Obtain the writer lock
while (true)
{
const auto ctrl = m_ctrl.load();
if (ctrl & SM_WRITER_LOCK)
{
m_wcv.wait(lock);
continue;
}
if (m_ctrl.compare_and_swap_test(ctrl, ctrl | SM_WRITER_LOCK))
{
break;
}
}
// Wait for remaining readers
while ((m_ctrl & SM_READER_MASK) != 0)
{
m_ocv.wait(lock);
}
if (!--m_wq_size && !m_rq_size)
{
m_ctrl &= ~SM_WAITERS_BIT;
}
}
void unlock_notify()
{
// Mutex is locked for reliable notification because m_ctrl has been changed outside
std::lock_guard<std::mutex> lock(m_mutex);
if (m_wq_size)
{
// Notify next exclusive owner
m_wcv.notify_one();
}
else if (m_rq_size)
{
// Notify all readers
m_rcv.notify_all();
}
}
void lock_hard();
void unlock_notify();
public:
shared_mutex() = default;
constexpr shared_mutex() = default;
~shared_mutex();
// Initialize internal data
void initialize_once();
bool try_lock_shared()
{
auto ctrl = m_ctrl.load();
if (UNLIKELY(ctrl >= SM_READER_MAX))
{
ctrl = 0;
}
// Weak attempt
return LIKELY(m_ctrl.compare_and_swap_test(ctrl, ctrl + 1));
}
// Lock in shared mode
void lock_shared()
{
if (m_ctrl++ >= SM_READER_MAX)
// Optimization: unconditional increment, compensated later
if (UNLIKELY(m_ctrl++ >= SM_READER_MAX))
{
lock_shared_hard();
}
}
// Try to lock in shared mode
bool try_lock_shared()
{
auto ctrl = m_ctrl.load();
return ctrl < SM_READER_MAX && m_ctrl.compare_and_swap_test(ctrl, ctrl + 1);
}
// Unlock in shared mode
void unlock_shared()
{
if (m_ctrl-- >= SM_READER_MAX)
if (UNLIKELY(m_ctrl-- >= SM_READER_MAX))
{
unlock_shared_notify();
}
}
// Try to lock exclusively
bool try_lock()
{
return m_ctrl.compare_and_swap_test(0, SM_WRITER_LOCK);
return LIKELY(m_ctrl.compare_and_swap_test(0, SM_WRITER_LOCK));
}
// Lock exclusively
void lock()
{
if (m_ctrl.compare_and_swap_test(0, SM_WRITER_LOCK)) return;
lock_hard();
if (UNLIKELY(!try_lock()))
{
lock_hard();
}
}
// Unlock exclusively
void unlock()
{
if (m_ctrl.fetch_sub(SM_WRITER_LOCK) != SM_WRITER_LOCK)
if (UNLIKELY(m_ctrl.fetch_sub(SM_WRITER_LOCK) != SM_WRITER_LOCK))
{
unlock_notify();
}
}
};
//! Simplified shared (reader) lock implementation, similar to std::lock_guard.
//! Simplified shared (reader) lock implementation.
//! std::shared_lock may be used instead if necessary.
class reader_lock final
{
@ -225,3 +112,24 @@ public:
m_mutex.unlock_shared();
}
};
//! Simplified exclusive (writer) lock implementation.
//! std::lock_guard may or std::unique_lock be used instead if necessary.
class writer_lock final
{
shared_mutex& m_mutex;
public:
writer_lock(const writer_lock&) = delete;
writer_lock(shared_mutex& mutex)
: m_mutex(mutex)
{
m_mutex.lock();
}
~writer_lock()
{
m_mutex.unlock();
}
};

View file

@ -1,6 +1,10 @@
#include "StrFmt.h"
#include "BEType.h"
#include <cassert>
#include <array>
#include <memory>
std::string v128::to_hex() const
{
return fmt::format("%016llx%016llx", _u64[1], _u64[0]);
@ -74,6 +78,59 @@ std::string fmt::to_sdec(s64 svalue)
return std::string(&res[first], sizeof(res) - first);
}
std::string fmt::_vformat(const char* fmt, va_list _args) noexcept
{
// Fixed stack buffer for the first attempt
std::array<char, 4096> fixed_buf;
// Possibly dynamically allocated buffer for the second attempt
std::unique_ptr<char[]> buf;
// Pointer to the current buffer
char* buf_addr = fixed_buf.data();
for (std::size_t buf_size = fixed_buf.size();;)
{
va_list args;
va_copy(args, _args);
#ifndef _MSC_VER
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-security"
#endif
const std::size_t len = std::vsnprintf(buf_addr, buf_size, fmt, args);
#ifndef _MSC_VER
#pragma GCC diagnostic pop
#endif
va_end(args);
assert(len <= INT_MAX);
if (len < buf_size)
{
return{ buf_addr, len };
}
buf.reset(buf_addr = new char[buf_size = len + 1]);
}
}
std::string fmt::_format(const char* fmt...) noexcept
{
va_list args;
va_start(args, fmt);
auto result = fmt::_vformat(fmt, args);
va_end(args);
return result;
}
fmt::exception_base::exception_base(const char* fmt...)
: std::runtime_error((va_start(m_args, fmt), _vformat(fmt, m_args)))
{
va_end(m_args);
}
std::string fmt::replace_first(const std::string& src, const std::string& from, const std::string& to)
{
auto pos = src.find(from);

View file

@ -1,21 +1,16 @@
#pragma once
#include <array>
#include <cstdarg>
#include <string>
#include <vector>
#include <functional>
#include <memory>
#include "Platform.h"
#include "types.h"
#if defined(_MSC_VER) && _MSC_VER <= 1800
#define snprintf _snprintf
#endif
// Copy null-terminated string from std::string to char array with truncation
template<std::size_t N>
inline void strcpy_trunc(char(&dst)[N], const std::string& src)
force_inline void strcpy_trunc(char(&dst)[N], const std::string& src)
{
const std::size_t count = src.size() >= N ? N - 1 : src.size();
std::memcpy(dst, src.c_str(), count);
@ -24,13 +19,33 @@ inline void strcpy_trunc(char(&dst)[N], const std::string& src)
// Copy null-terminated string from char array to another char array with truncation
template<std::size_t N, std::size_t N2>
inline void strcpy_trunc(char(&dst)[N], const char(&src)[N2])
force_inline void strcpy_trunc(char(&dst)[N], const char(&src)[N2])
{
const std::size_t count = N2 >= N ? N - 1 : N2;
std::memcpy(dst, src, count);
dst[count] = '\0';
}
// Formatting helper, type-specific preprocessing for improving safety and functionality
template<typename T, typename>
struct unveil
{
// TODO
static inline const T& get(const T& arg)
{
return arg;
}
};
template<>
struct unveil<std::string, void>
{
static inline const char* get(const std::string& arg)
{
return arg.c_str();
}
};
namespace fmt
{
std::string replace_first(const std::string& src, const std::string& from, const std::string& to);
@ -87,125 +102,45 @@ namespace fmt
std::string to_hex(u64 value, u64 count = 1);
std::string to_udec(u64 value);
std::string to_sdec(s64 value);
template<typename T, typename>
struct unveil
{
using result_type = T;
force_inline static result_type get_value(const T& arg)
{
return arg;
}
};
template<>
struct unveil<const char*, void>
{
using result_type = const char* const;
static result_type get_value(const char* const& arg)
{
return arg;
}
};
template<std::size_t N>
struct unveil<char[N], void>
{
using result_type = const char* const;
static result_type get_value(const char(&arg)[N])
{
return arg;
}
};
template<>
struct unveil<std::string, void>
{
using result_type = const char*;
static result_type get_value(const std::string& arg)
{
return arg.c_str();
}
};
template<typename T>
struct unveil<T, std::enable_if_t<std::is_enum<T>::value>>
{
using result_type = std::underlying_type_t<T>;
force_inline static result_type get_value(const T& arg)
{
return static_cast<result_type>(arg);
}
};
template<typename T>
force_inline typename unveil<T>::result_type do_unveil(const T& arg)
{
return unveil<T>::get_value(arg);
}
std::string _format(const char* fmt...) noexcept;
std::string _vformat(const char*, va_list) noexcept;
// Formatting function with special functionality (fmt::unveil)
template<typename... Args>
safe_buffers std::string format(const char* fmt, const Args&... args)
force_inline std::string format(const char* fmt, const Args&... args) noexcept
{
// fixed stack buffer for the first attempt
std::array<char, 4096> fixed_buf;
return _format(fmt, ::unveil<Args>::get(args)...);
}
// possibly dynamically allocated buffer for the second attempt
std::unique_ptr<char[]> buf;
// Helper class
class exception_base : public std::runtime_error
{
// Helper (there is no other room)
va_list m_args;
// pointer to the current buffer
char* buf_addr = fixed_buf.data();
protected:
// Internal formatting constructor
exception_base(const char* fmt...);
};
for (std::size_t buf_size = fixed_buf.size();;)
// Exception type derived from std::runtime_error with formatting constructor
class exception : public exception_base
{
public:
// Formatting constructor
template<typename... Args>
exception(const char* fmt, const Args&... args)
: exception_base(fmt, ::unveil<Args>::get(args)...)
{
#ifndef _MSC_VER
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-security"
#endif
const std::size_t len = std::snprintf(buf_addr, buf_size, fmt, do_unveil(args)...);
#ifndef _MSC_VER
#pragma GCC diagnostic pop
#endif
if (len > INT_MAX)
{
throw std::runtime_error("std::snprintf() failed");
}
if (len < buf_size)
{
return{ buf_addr, len };
}
buf.reset(buf_addr = new char[buf_size = len + 1]);
}
}
// Create exception of type T (std::runtime_error by default) with formatting
template<typename T = std::runtime_error, typename... Args>
never_inline safe_buffers T exception(const char* fmt, const Args&... args) noexcept(noexcept(T{ fmt }))
{
return T{ format(fmt, do_unveil(args)...).c_str() };
}
// Create exception of type T (std::runtime_error by default) without formatting
template<typename T = std::runtime_error>
safe_buffers T exception(const char* msg) noexcept(noexcept(T{ msg }))
{
return T{ msg };
}
};
// Narrow cast (similar to gsl::narrow) with exception message formatting
template<typename To, typename From, typename... Args>
inline auto narrow(const char* format_str, const From& value, const Args&... args) -> decltype(static_cast<To>(static_cast<From>(std::declval<To>())))
{
const auto result = static_cast<To>(value);
if (static_cast<From>(result) != value) throw fmt::exception(format_str, fmt::do_unveil(value), fmt::do_unveil(args)...);
if (static_cast<From>(result) != value) throw fmt::exception(format_str, value, args...);
return result;
}

View file

@ -52,38 +52,6 @@ static void report_fatal_error(const std::string& msg)
std::abort();
}
void SetCurrentThreadDebugName(const char* threadName)
{
#if defined(_MSC_VER) // this is VS-specific way to set thread names for the debugger
#pragma pack(push,8)
struct THREADNAME_INFO
{
DWORD dwType;
LPCSTR szName;
DWORD dwThreadID;
DWORD dwFlags;
} info;
#pragma pack(pop)
info.dwType = 0x1000;
info.szName = threadName;
info.dwThreadID = -1;
info.dwFlags = 0;
__try
{
RaiseException(0x406D1388, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
}
#endif
}
enum x64_reg_t : u32
{
X64R_RAX = 0,
@ -1295,34 +1263,204 @@ const bool s_self_test = []() -> bool
return true;
}();
#include <mutex>
#include <condition_variable>
#include <exception>
thread_local DECLARE(thread_ctrl::g_tls_this_thread) = nullptr;
struct thread_ctrl::internal
{
std::mutex mutex;
std::condition_variable cond;
std::condition_variable join; // Allows simultaneous joining
task_stack atexit;
std::exception_ptr exception; // Caught exception
};
// Temporarily until better interface is implemented
extern std::condition_variable& get_current_thread_cv()
{
return thread_ctrl::get_current()->get_data()->cond;
}
extern std::mutex& get_current_thread_mutex()
{
return thread_ctrl::get_current()->get_data()->mutex;
}
// TODO
atomic_t<u32> g_thread_count{ 0 };
extern atomic_t<u32> g_thread_count(0);
void thread_ctrl::initialize()
{
SetCurrentThreadDebugName(g_tls_this_thread->m_name.c_str());
// Initialize TLS variable
g_tls_this_thread = this;
#if defined(_MSC_VER)
struct THREADNAME_INFO
{
DWORD dwType;
LPCSTR szName;
DWORD dwThreadID;
DWORD dwFlags;
};
// Set thread name for VS debugger
if (IsDebuggerPresent())
{
THREADNAME_INFO info;
info.dwType = 0x1000;
info.szName = m_name.c_str();
info.dwThreadID = -1;
info.dwFlags = 0;
__try
{
RaiseException(0x406D1388, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
}
}
#endif
_log::g_tls_make_prefix = [](const auto&, auto, const auto&)
{
return g_tls_this_thread->m_name;
};
// TODO
++g_thread_count;
}
void thread_ctrl::set_exception() noexcept
{
initialize_once();
m_data->exception = std::current_exception();
}
void thread_ctrl::finalize() noexcept
{
// TODO
vm::reservation_free();
// TODO
// Call atexit functions
if (m_data) m_data->atexit.exec();
--g_thread_count;
// Call atexit functions
g_tls_this_thread->m_atexit.exec();
#ifdef _MSC_VER
ULONG64 time;
QueryThreadCycleTime(m_thread.native_handle(), &time);
LOG_NOTICE(GENERAL, "Thread time: %f Gc", time / 1000000000.);
#endif
}
task_stack& thread_ctrl::get_atexit() const
{
initialize_once();
return m_data->atexit;
}
thread_ctrl::~thread_ctrl()
{
if (m_thread.joinable())
{
m_thread.detach();
}
delete m_data;
}
void thread_ctrl::initialize_once() const
{
if (!m_data)
{
auto ptr = new thread_ctrl::internal;
if (!m_data.compare_and_swap_test(nullptr, ptr))
{
delete ptr;
}
}
}
void thread_ctrl::join()
{
if (m_thread.joinable())
{
// Increase contention counter
if (m_joining++)
{
// Hard way
initialize_once();
std::unique_lock<std::mutex> lock(m_data->mutex);
m_data->join.wait(lock, WRAP_EXPR(!m_thread.joinable()));
}
else
{
// Winner joins the thread
m_thread.join();
// Notify others if necessary
if (m_joining > 1)
{
initialize_once();
// Serialize for reliable notification
m_data->mutex.lock();
m_data->mutex.unlock();
m_data->join.notify_all();
}
}
}
if (m_data && m_data->exception)
{
std::rethrow_exception(m_data->exception);
}
}
void thread_ctrl::lock_notify() const
{
if (UNLIKELY(g_tls_this_thread == this))
{
return;
}
initialize_once();
// Serialize for reliable notification, condition is assumed to be changed externally
m_data->mutex.lock();
m_data->mutex.unlock();
m_data->cond.notify_one();
}
void thread_ctrl::notify() const
{
initialize_once();
m_data->cond.notify_one();
}
thread_ctrl::internal* thread_ctrl::get_data() const
{
initialize_once();
return m_data;
}
named_thread::named_thread()
{
}
named_thread::~named_thread()
{
LOG_TRACE(GENERAL, "%s", __func__);
}
std::string named_thread::get_name() const
@ -1332,8 +1470,6 @@ std::string named_thread::get_name() const
void named_thread::start()
{
Expects(!m_thread);
// Get shared_ptr instance (will throw if called from the constructor or the object has been created incorrectly)
auto&& ptr = shared_from_this();
@ -1359,19 +1495,3 @@ void named_thread::start()
thread->on_exit();
});
}
void named_thread::join()
{
Expects(m_thread);
try
{
m_thread->join();
m_thread.reset();
}
catch (...)
{
m_thread.reset();
throw;
}
}

View file

@ -1,13 +1,11 @@
#pragma once
#include <exception>
#include <string>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include "Platform.h"
#include "Atomic.h"
// Will report exception and call std::abort() if put in catch(...)
[[noreturn]] void catch_all_exceptions();
@ -32,12 +30,6 @@ class task_stack
std::unique_ptr<task_base> m_stack;
never_inline void push(std::unique_ptr<task_base> task)
{
m_stack.swap(task->next);
m_stack.swap(task);
}
public:
template<typename F>
void push(F&& func)
@ -58,7 +50,16 @@ public:
}
};
return push(std::unique_ptr<task_base>{ new task_t(std::forward<F>(func)) });
auto _top = new task_t(std::forward<F>(func));
auto _next = m_stack.release();
m_stack.reset(_top);
#ifndef _MSC_VER
_top->next.reset(_next);
#else
auto& next = _top->next;
next.release();
next.reset(_next);
#endif
}
void reset()
@ -78,25 +79,33 @@ public:
// Thread control class
class thread_ctrl final
{
struct internal;
static thread_local thread_ctrl* g_tls_this_thread;
// Thread handle
std::thread m_thread;
// Thread join contention counter
atomic_t<uint> m_joining{};
// Fixed name
std::string m_name;
// Thread handle (be careful)
std::thread m_thread;
// Thread result (exception)
std::exception_ptr m_exception;
// Functions scheduled at thread exit
task_stack m_atexit;
// Thread internals
mutable atomic_t<internal*> m_data{};
// Called at the thread start
static void initialize();
void initialize();
// Set std::current_exception
void set_exception() noexcept;
// Called at the thread end
static void finalize() noexcept;
void finalize() noexcept;
// Get atexit function
task_stack& get_atexit() const;
public:
template<typename N>
@ -108,13 +117,7 @@ public:
// Disable copy/move constructors and operators
thread_ctrl(const thread_ctrl&) = delete;
~thread_ctrl()
{
if (m_thread.joinable())
{
m_thread.detach();
}
}
~thread_ctrl();
// Get thread name
const std::string& get_name() const
@ -122,19 +125,20 @@ public:
return m_name;
}
// Get thread result (may throw)
void join()
{
if (m_thread.joinable())
{
m_thread.join();
}
// Initialize internal data
void initialize_once() const;
if (auto&& e = std::move(m_exception))
{
std::rethrow_exception(e);
}
}
// Get thread result (may throw, simultaneous joining allowed)
void join();
// Lock, unlock, notify the thread (required if the condition changed locklessly)
void lock_notify() const;
// Notify the thread, beware the condition change
void notify() const;
//
internal* get_data() const;
// Get current thread (may be nullptr)
static const thread_ctrl* get_current()
@ -146,34 +150,30 @@ public:
template<typename F>
static inline void at_exit(F&& func)
{
return g_tls_this_thread->m_atexit.push(std::forward<F>(func));
return g_tls_this_thread->get_atexit().push(std::forward<F>(func));
}
// Named thread factory
template<typename N, typename F>
static inline std::shared_ptr<thread_ctrl> spawn(N&& name, F&& func)
template<typename N, typename F, typename... Args>
static inline std::shared_ptr<thread_ctrl> spawn(N&& name, F&& func, Args&&... args)
{
auto ctrl = std::make_shared<thread_ctrl>(std::forward<N>(name));
ctrl->m_thread = std::thread([ctrl, task = std::forward<F>(func)]()
ctrl->m_thread = std::thread([ctrl, task = std::forward<F>(func)](Args&&... args)
{
// Initialize TLS variable
g_tls_this_thread = ctrl.get();
try
{
initialize();
task();
finalize();
ctrl->initialize();
task(std::forward<Args>(args)...);
}
catch (...)
{
finalize();
// Set exception
ctrl->m_exception = std::current_exception();
ctrl->set_exception();
}
});
ctrl->finalize();
}, std::forward<Args>(args)...);
return ctrl;
}
@ -185,21 +185,27 @@ class named_thread : public std::enable_shared_from_this<named_thread>
std::shared_ptr<thread_ctrl> m_thread;
public:
// Thread condition variable for external use (this thread waits on it, other threads may notify)
std::condition_variable cv;
named_thread();
// Thread mutex for external use (can be used with `cv`)
std::mutex mutex;
virtual ~named_thread();
// Lock mutex, notify condition variable
void safe_notify()
{
// Lock for reliable notification, condition is assumed to be changed externally
std::unique_lock<std::mutex> lock(mutex);
// Deleted copy/move constructors + copy/move operators
named_thread(const named_thread&) = delete;
cv.notify_one();
}
// Get thread name
virtual std::string get_name() const;
protected:
// Start thread (cannot be called from the constructor: should throw bad_weak_ptr in such case)
void start();
// Thread task (called in the thread)
virtual void on_task() = 0;
// Thread finalization (called after on_task)
virtual void on_exit() {}
public:
// ID initialization
virtual void on_init()
{
@ -209,43 +215,25 @@ public:
// ID finalization
virtual void on_stop()
{
join();
m_thread->join();
}
protected:
// Thread task (called in the thread)
virtual void on_task() = 0;
// Thread finalization (called after on_task)
virtual void on_exit() {}
public:
named_thread() = default;
virtual ~named_thread() = default;
// Deleted copy/move constructors + copy/move operators
named_thread(const named_thread&) = delete;
// Get thread name
virtual std::string get_name() const;
// Start thread (cannot be called from the constructor: should throw bad_weak_ptr in such case)
void start();
// Join thread (get thread result)
void join();
// Get thread_ctrl
const thread_ctrl* get_thread_ctrl() const
const thread_ctrl* operator->() const
{
return m_thread.get();
}
// Compare with the current thread
bool is_current() const
// Lock mutex, notify condition variable
void lock_notify() const
{
return m_thread && thread_ctrl::get_current() == m_thread.get();
m_thread->lock_notify();
}
// Notify condition variable
void notify() const
{
m_thread->notify();
}
};

View file

@ -79,11 +79,8 @@ struct atomic_test_and_complement;
template<typename T>
class atomic_t;
namespace fmt
{
template<typename T, typename = void>
struct unveil;
}
template<typename T, typename = void>
struct unveil;
// TODO: replace with std::void_t when available
namespace void_details
@ -409,6 +406,38 @@ struct ignore
}
};
// Contains value of any POD type with fixed size and alignment. TT<> is the type converter applied.
// For example, `simple_t` may be used to remove endianness.
template<template<typename> class TT, std::size_t S, std::size_t A = S>
struct alignas(A) any_pod
{
enum class byte : char {} data[S];
any_pod() = default;
template<typename T, typename T2 = TT<T>, typename = std::enable_if_t<std::is_pod<T2>::value && sizeof(T2) == S && alignof(T2) <= A>>
any_pod(const T& value)
{
reinterpret_cast<T2&>(data) = value;
}
template<typename T, typename T2 = TT<T>, typename = std::enable_if_t<std::is_pod<T2>::value && sizeof(T2) == S && alignof(T2) <= A>>
T2& as()
{
return reinterpret_cast<T2&>(data);
}
template<typename T, typename T2 = TT<T>, typename = std::enable_if_t<std::is_pod<T2>::value && sizeof(T2) == S && alignof(T2) <= A>>
const T2& as() const
{
return reinterpret_cast<const T2&>(data);
}
};
using any16 = any_pod<simple_t, sizeof(u16)>;
using any32 = any_pod<simple_t, sizeof(u32)>;
using any64 = any_pod<simple_t, sizeof(u64)>;
// Allows to define integer convertible to multiple enum types
template<typename T = void, typename... Ts>
struct multicast : multicast<Ts...>

View file

@ -1,4 +1,5 @@
if (NOT APPLE)
if(APPLE OR WIN32 AND NOT MSVC)
else()
add_subdirectory( glslang )
set(BUILD_TESTS OFF CACHE BOOL "Build tests" FORCE)
set(BUILD_DEMOS OFF CACHE BOOL "Build demos" FORCE)
@ -6,4 +7,4 @@ set(BUILD_DEMOS OFF CACHE BOOL "Build demos" FORCE)
set(BUILD_LAYERS OFF CACHE BOOL "Build demos" FORCE)
set(BUILD_VKJSON OFF CACHE BOOL "Build demos" FORCE)
add_subdirectory( Vulkan-LoaderAndValidationLayers )
endif()
endif()

View file

@ -23,6 +23,15 @@ elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
if(APPLE)
add_compile_options(-stdlib=libc++)
endif()
if(WIN32)
add_compile_options(-pthread)
add_compile_options(-D__GXX_ABI_VERSION=1009)
endif()
endif()
if(WIN32)
add_definitions(-DUNICODE)
add_definitions(-D_WIN32_WINNT=0x0601)
endif()
if(NOT MSVC)
@ -32,11 +41,8 @@ if(NOT MSVC)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -fexceptions")
add_compile_options(-msse -msse2 -mcx16 -mssse3)
if(WIN32)
add_compile_options(-municode -static -mwindows)
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHa /Zc:throwingNew /D _CRT_SECURE_NO_DEPRECATE=1 /D _CRT_NON_CONFORMING_SWPRINTFS=1 /D _SCL_SECURE_NO_WARNINGS=1")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zc:throwingNew /D _CRT_SECURE_NO_DEPRECATE=1 /D _CRT_NON_CONFORMING_SWPRINTFS=1 /D _SCL_SECURE_NO_WARNINGS=1")
endif()
if(APPLE)
@ -88,7 +94,7 @@ if(NOT MSVC)
endif()
find_package(OpenGL REQUIRED)
find_package(OpenAL REQUIRED)
find_package(LLVM CONFIG)
#find_package(LLVM CONFIG) # TODO
include("${wxWidgets_USE_FILE}")
@ -141,15 +147,9 @@ else()
else()
llvm_map_components_to_libnames(LLVM_LIBS mcjit vectorize x86codegen x86disassembler mcdisassembler)
endif()
if(MSVC)
set_source_files_properties(${RPCS3_SRC_DIR}/Emu/Cell/PPULLVMRecompiler.cpp PROPERTIES COMPILE_FLAGS /GR-)
else()
set_source_files_properties(${RPCS3_SRC_DIR}/Emu/Cell/PPULLVMRecompiler.cpp PROPERTIES COMPILE_FLAGS -fno-rtti)
endif()
endif()
link_directories(
"${RPCS3_SRC_DIR}/../asmjit/"
"${RPCS3_SRC_DIR}/../3rdparty/minidx12/"
)
@ -169,9 +169,10 @@ RPCS3_SRC
"${RPCS3_SRC_DIR}/../Utilities/*.cpp"
"${RPCS3_SRC_DIR}/../rsx_program_decompiler/rsx_decompiler/*.cpp"
"${RPCS3_SRC_DIR}/../rsx_program_decompiler/shader_code/*.cpp"
"${RPCS3_SRC_DIR}/../asmjit/src/asmjit/*.cpp"
)
if(APPLE)
if(APPLE OR WIN32 AND NOT MSVC)
set (EXCLUDE_DIR "/RSX/VK/")
foreach (TMP_PATH ${RPCS3_SRC})
string (FIND ${TMP_PATH} ${EXCLUDE_DIR} EXCLUDE_DIR_FOUND)
@ -184,36 +185,26 @@ endif()
add_executable(rpcs3 ${RPCS3_SRC})
if(NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -L${CMAKE_CURRENT_BINARY_DIR}/../asmjit/") #hack because the asmjit cmake file force fno exceptions upd: not sure if vs2015 build is affected
else()
if(MSVC)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /SUBSYSTEM:WINDOWS /NODEFAULTLIB:libc.lib /NODEFAULTLIB:libcmt.lib /NODEFAULTLIB:libcd.lib /NODEFAULTLIB:libcmtd.lib /NODEFAULTLIB:msvcrtd.lib")
endif()
if(WIN32) # I'm not sure we need all of these libs, but we link them in vs
target_link_libraries(rpcs3 odbc32.lib odbccp32.lib comctl32.lib ws2_32.lib shlwapi.lib winmm.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib Iphlpapi.lib)
if(LLVM_FOUND)
target_link_libraries(rpcs3 ${LLVM_LIBS})
endif()
if(WIN32)
target_link_libraries(rpcs3 ws2_32.lib Winmm.lib)
if(NOT MSVC)
target_link_libraries(rpcs3 ${OPENGL_LIBRARIES} ${GLEW_LIBRARY} opengl32.lib glu32.lib)
target_link_libraries(rpcs3 ${OPENGL_LIBRARIES} ${GLEW_LIBRARY} opengl32.lib glu32.lib -lstdc++.dll -lpthread.dll)
else()
target_link_libraries(rpcs3 dxgi.lib d2d1.lib dwrite.lib)
target_link_libraries(rpcs3 dxgi.lib d2d1.lib dwrite.lib VKstatic.1 glslang OSDependent OGLCompiler SPIRV)
endif()
target_link_libraries(rpcs3 asmjit.lib avformat.lib avcodec.lib avutil.lib swresample.lib swscale.lib png16_static ${wxWidgets_LIBRARIES} ${OPENAL_LIBRARY} ${ADDITIONAL_LIBS} VKstatic.1 glslang OSDependent OGLCompiler SPIRV)
target_link_libraries(rpcs3 avformat.lib avcodec.lib avutil.lib swresample.lib swscale.lib png16_static ${wxWidgets_LIBRARIES} ${OPENAL_LIBRARY} ${ADDITIONAL_LIBS})
else()
if(LLVM_FOUND)
target_link_libraries(rpcs3 asmjit.a ${wxWidgets_LIBRARIES} ${OPENAL_LIBRARY} ${GLEW_LIBRARY} ${OPENGL_LIBRARIES})
target_link_libraries(rpcs3 libavformat.a libavcodec.a libavutil.a libswresample.a libswscale.a png16_static ${ZLIB_LIBRARIES} ${LLVM_LIBS} ${ADDITIONAL_LIBS})
if (NOT APPLE)
target_link_libraries(rpcs3 vulkan glslang OSDependent OGLCompiler SPIRV)
endif()
else()
target_link_libraries(rpcs3 asmjit.a ${wxWidgets_LIBRARIES} ${OPENAL_LIBRARY} ${GLEW_LIBRARY} ${OPENGL_LIBRARIES})
target_link_libraries(rpcs3 libavformat.a libavcodec.a libavutil.a libswresample.a libswscale.a png16_static ${ZLIB_LIBRARIES} ${ADDITIONAL_LIBS})
if (NOT APPLE)
target_link_libraries(rpcs3 vulkan glslang OSDependent OGLCompiler SPIRV)
endif()
endif()
target_link_libraries(rpcs3 ${wxWidgets_LIBRARIES} ${OPENAL_LIBRARY} ${GLEW_LIBRARY} ${OPENGL_LIBRARIES})
target_link_libraries(rpcs3 libavformat.a libavcodec.a libavutil.a libswresample.a libswscale.a png16_static ${ZLIB_LIBRARIES} ${ADDITIONAL_LIBS})
if (NOT APPLE)
target_link_libraries(rpcs3 vulkan glslang OSDependent OGLCompiler SPIRV)
endif()
endif()
if(LLVM_FOUND)
target_link_libraries(rpcs3 ${LLVM_LIBS})
endif()
set_target_properties(rpcs3 PROPERTIES COTIRE_CXX_PREFIX_HEADER_INIT "${RPCS3_SRC_DIR}/stdafx.h")

View file

@ -5,59 +5,7 @@
#include "key_vault.h"
#include "unpkg.h"
static bool CheckHeader(const fs::file& pkg_f, PKGHeader& header)
{
if (header.pkg_magic != 0x7F504B47)
{
LOG_ERROR(LOADER, "PKG: Not a package file!");
return false;
}
switch (const u16 type = header.pkg_type)
{
case PKG_RELEASE_TYPE_DEBUG: break;
case PKG_RELEASE_TYPE_RELEASE: break;
default:
{
LOG_ERROR(LOADER, "PKG: Unknown PKG type (0x%x)", type);
return false;
}
}
switch (const u16 platform = header.pkg_platform)
{
case PKG_PLATFORM_TYPE_PS3: break;
case PKG_PLATFORM_TYPE_PSP: break;
default:
{
LOG_ERROR(LOADER, "PKG: Unknown PKG platform (0x%x)", platform);
return false;
}
}
if (header.header_size != PKG_HEADER_SIZE && header.header_size != PKG_HEADER_SIZE2)
{
LOG_ERROR(LOADER, "PKG: Wrong header size (0x%x)", header.header_size);
return false;
}
if (header.pkg_size > pkg_f.size())
{
LOG_ERROR(LOADER, "PKG: File size mismatch (pkg_size=0x%llx)", header.pkg_size);
return false;
}
if (header.data_size + header.data_offset > header.pkg_size)
{
LOG_ERROR(LOADER, "PKG: Data size mismatch (data_size=0x%llx, data_offset=0x%llx, file_size=0x%llx)", header.data_size, header.data_offset, header.pkg_size);
return false;
}
return true;
}
// PKG Decryption
bool pkg_install(const fs::file& pkg_f, const std::string& dir, volatile f64& progress)
bool pkg_install(const fs::file& pkg_f, const std::string& dir, atomic_t<double>& sync)
{
const std::size_t BUF_SIZE = 8192 * 1024; // 8 MB
@ -69,12 +17,53 @@ bool pkg_install(const fs::file& pkg_f, const std::string& dir, volatile f64& pr
if (!pkg_f.read(header))
{
LOG_ERROR(LOADER, "PKG: Package file is too short!");
LOG_ERROR(LOADER, "PKG file is too short!");
return false;
}
if (!CheckHeader(pkg_f, header))
if (header.pkg_magic != "\x7FPKG"_u32)
{
LOG_ERROR(LOADER, "Not a PKG file!");
return false;
}
switch (const u16 type = header.pkg_type)
{
case PKG_RELEASE_TYPE_DEBUG: break;
case PKG_RELEASE_TYPE_RELEASE: break;
default:
{
LOG_ERROR(LOADER, "Unknown PKG type (0x%x)", type);
return false;
}
}
switch (const u16 platform = header.pkg_platform)
{
case PKG_PLATFORM_TYPE_PS3: break;
case PKG_PLATFORM_TYPE_PSP: break;
default:
{
LOG_ERROR(LOADER, "Unknown PKG platform (0x%x)", platform);
return false;
}
}
if (header.header_size != PKG_HEADER_SIZE && header.header_size != PKG_HEADER_SIZE2)
{
LOG_ERROR(LOADER, "Wrong PKG header size (0x%x)", header.header_size);
return false;
}
if (header.pkg_size > pkg_f.size())
{
LOG_ERROR(LOADER, "PKG file size mismatch (pkg_size=0x%llx)", header.pkg_size);
return false;
}
if (header.data_size + header.data_offset > header.pkg_size)
{
LOG_ERROR(LOADER, "PKG data size mismatch (data_size=0x%llx, data_offset=0x%llx, file_size=0x%llx)", header.data_size, header.data_offset, header.pkg_size);
return false;
}
@ -141,8 +130,6 @@ bool pkg_install(const fs::file& pkg_f, const std::string& dir, volatile f64& pr
return read;
};
LOG_SUCCESS(LOADER, "PKG: Installing in %s (%d entries)...", dir, header.file_count);
decrypt(0, header.file_count * sizeof(PKGEntry), header.pkg_platform == PKG_PLATFORM_TYPE_PSP);
std::vector<PKGEntry> entries(header.file_count);
@ -155,7 +142,7 @@ bool pkg_install(const fs::file& pkg_f, const std::string& dir, volatile f64& pr
if (entry.name_size > 256)
{
LOG_ERROR(LOADER, "PKG: Name size is too big (0x%x)", entry.name_size);
LOG_ERROR(LOADER, "PKG name size is too big (0x%x)", entry.name_size);
continue;
}
@ -183,31 +170,35 @@ bool pkg_install(const fs::file& pkg_f, const std::string& dir, volatile f64& pr
if (decrypt(entry.file_offset + pos, block_size, is_psp) != block_size)
{
LOG_ERROR(LOADER, "PKG: Failed to extract file %s", path);
LOG_ERROR(LOADER, "Failed to extract file %s", path);
break;
}
if (out.write(buf.get(), block_size) != block_size)
{
LOG_ERROR(LOADER, "PKG: Failed to write file %s", path);
LOG_ERROR(LOADER, "Failed to write file %s", path);
break;
}
progress += (block_size + 0.0) / header.data_size;
if (sync.fetch_add((block_size + 0.0) / header.data_size) < 0.)
{
LOG_ERROR(LOADER, "Package installation cancelled: %s", dir);
return false;
}
}
if (did_overwrite)
{
LOG_SUCCESS(LOADER, "PKG: %s file overwritten", name);
LOG_WARNING(LOADER, "Overwritten file %s", name);
}
else
{
LOG_SUCCESS(LOADER, "PKG: %s file created", name);
LOG_NOTICE(LOADER, "Created file %s", name);
}
}
else
{
LOG_ERROR(LOADER, "PKG: Could not create file %s", path);
LOG_ERROR(LOADER, "Failed to create file %s", path);
}
break;
@ -219,15 +210,15 @@ bool pkg_install(const fs::file& pkg_f, const std::string& dir, volatile f64& pr
if (fs::create_dir(path))
{
LOG_SUCCESS(LOADER, "PKG: %s directory created", name);
LOG_NOTICE(LOADER, "Created directory %s", name);
}
else if (fs::is_dir(path))
{
LOG_SUCCESS(LOADER, "PKG: %s directory already exists", name);
LOG_WARNING(LOADER, "Reused existing directory %s", name);
}
else
{
LOG_ERROR(LOADER, "PKG: Could not create directory %s", path);
LOG_ERROR(LOADER, "Failed to create directory %s", path);
}
break;
@ -235,10 +226,11 @@ bool pkg_install(const fs::file& pkg_f, const std::string& dir, volatile f64& pr
default:
{
LOG_ERROR(LOADER, "PKG: Unknown PKG entry type (0x%x) %s", entry.type, name);
LOG_ERROR(LOADER, "Unknown PKG entry type (0x%x) %s", entry.type, name);
}
}
}
LOG_SUCCESS(LOADER, "Package successfully installed to %s", dir);
return true;
}

View file

@ -32,7 +32,7 @@ enum : u32
// Structs
struct PKGHeader
{
be_t<u32> pkg_magic; // Magic (0x7f504b47)
nse_t<u32> pkg_magic; // Magic (0x7f504b47)
be_t<u16> pkg_type; // Release type (Retail:0x8000, Debug:0x0000)
be_t<u16> pkg_platform; // Platform type (PS3:0x0001, PSP:0x0002)
be_t<u32> header_size; // Header size (0xc0)
@ -57,4 +57,4 @@ struct PKGEntry
be_t<u32> pad; // Padding (zeros)
};
bool pkg_install(const class fs::file& pkg_f, const std::string& dir, volatile f64& progress);
bool pkg_install(const class fs::file& pkg_f, const std::string& dir, atomic_t<double>&);

View file

@ -182,13 +182,13 @@ ARMv7Thread::~ARMv7Thread()
}
}
ARMv7Thread::ARMv7Thread(const std::string& name)
: cpu_thread(cpu_type::arm, name)
{
}
void ARMv7Thread::fast_call(u32 addr)
{
if (!is_current())
{
throw EXCEPTION("Called from the wrong thread");
}
auto old_PC = PC;
auto old_stack = SP;
auto old_LR = LR;

View file

@ -18,14 +18,10 @@ public:
virtual std::string dump() const override;
virtual void cpu_init() override;
virtual void cpu_task() override;
ARMv7Thread(const std::string& name)
: cpu_thread(cpu_type::arm, name)
{
}
virtual ~ARMv7Thread() override;
ARMv7Thread(const std::string& name);
union
{
u32 GPR[15];
@ -209,7 +205,7 @@ template<typename T>
struct arm_gpr_cast_impl<T, std::enable_if_t<std::is_integral<T>::value || std::is_enum<T>::value>>
{
static_assert(sizeof(T) <= 4, "Too big integral type for arm_gpr_cast<>()");
static_assert(std::is_same<const T, const bool>::value == false, "bool type is deprecated in arm_gpr_cast<>(), use b8 instead");
static_assert(std::is_same<CV T, CV bool>::value == false, "bool type is deprecated in arm_gpr_cast<>(), use b8 instead");
static inline u32 to(const T& value)
{

View file

@ -9,6 +9,8 @@ LOG_CHANNEL(sceLibKernel);
extern u64 get_system_time();
extern std::condition_variable& get_current_thread_cv();
s32 sceKernelAllocMemBlock(vm::cptr<char> name, s32 type, u32 vsize, vm::ptr<SceKernelAllocMemBlockOpt> pOpt)
{
throw EXCEPTION("");
@ -71,7 +73,7 @@ arm_error_code sceKernelStartThread(s32 threadId, u32 argSize, vm::cptr<void> pA
thread->GPR[1] = pos;
thread->state -= cpu_state::stop;
thread->safe_notify();
thread->lock_notify();
return SCE_OK;
}
@ -484,11 +486,11 @@ arm_error_code sceKernelWaitEventFlag(ARMv7Thread& cpu, s32 evfId, u32 bitPatter
break;
}
cpu.cv.wait_for(lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lock, std::chrono::microseconds(timeout - passed));
}
else
{
cpu.cv.wait(lock);
get_current_thread_cv().wait(lock);
}
}
@ -563,7 +565,7 @@ arm_error_code sceKernelSetEventFlag(s32 evfId, u32 bitPattern)
cpu.GPR[1] = result;
thread->state += cpu_state::signal;
thread->cv.notify_one();
thread->notify();
return true;
}
@ -612,7 +614,7 @@ arm_error_code sceKernelCancelEventFlag(s32 evfId, u32 setPattern, vm::ptr<s32>
static_cast<ARMv7Thread&>(*thread).GPR[0] = SCE_KERNEL_ERROR_WAIT_CANCEL;
static_cast<ARMv7Thread&>(*thread).GPR[1] = setPattern;
thread->state += cpu_state::signal;
thread->cv.notify_one();
thread->notify();
}
*pNumWaitThreads = static_cast<u32>(evf->sq.size());

View file

@ -762,7 +762,7 @@ struct psv_event_flag_t
static_cast<ARMv7Thread&>(*thread).GPR[0] = SCE_KERNEL_ERROR_WAIT_DELETE;
static_cast<ARMv7Thread&>(*thread).GPR[1] = pattern;
thread->state += cpu_state::signal;
thread->cv.notify_one();
thread->notify();
}
}
};

View file

@ -1,5 +1,5 @@
#include "stdafx.h"
#ifdef _MSC_VER
#ifdef _WIN32
#include "Utilities/Log.h"
#include "Utilities/Config.h"
#include "Emu/System.h"
@ -143,4 +143,5 @@ void XAudio2Thread::AddData(const void* src, int size)
Emu.Pause();
}
}
#endif

View file

@ -1,13 +1,10 @@
#pragma once
#ifdef _MSC_VER
#ifdef _WIN32
#include "Emu/Audio/AudioThread.h"
#pragma push_macro("_WIN32_WINNT")
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0601 // This is to be sure that correct (2.7) header is included
#include "3rdparty/XAudio2_7/XAudio2.h" // XAudio2 2.8 available only on Win8+, used XAudio2 2.7 from dxsdk
#pragma pop_macro("_WIN32_WINNT")
#include "3rdparty/XAudio2_7/XAudio2.h"
class XAudio2Thread : public AudioThread
{
@ -25,4 +22,5 @@ public:
virtual void Stop() override;
virtual void AddData(const void* src, int size) override;
};
#endif

View file

@ -12,11 +12,13 @@ thread_local cpu_thread* g_tls_current_cpu_thread = nullptr;
void cpu_thread::on_task()
{
state -= cpu_state::exit;
g_tls_current_cpu_thread = this;
Emu.SendDbgCommand(DID_CREATE_THREAD, this);
std::unique_lock<std::mutex> lock(mutex);
std::unique_lock<std::mutex> lock(get_current_thread_mutex());
// Check thread status
while (!(state & cpu_state::exit))
@ -52,13 +54,29 @@ void cpu_thread::on_task()
continue;
}
cv.wait(lock);
get_current_thread_cv().wait(lock);
}
}
void cpu_thread::on_stop()
{
state += cpu_state::exit;
lock_notify();
}
cpu_thread::~cpu_thread()
{
}
cpu_thread::cpu_thread(cpu_type type, const std::string& name)
: type(type)
, name(name)
{
}
bool cpu_thread::check_status()
{
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
std::unique_lock<std::mutex> lock(get_current_thread_mutex(), std::defer_lock);
while (true)
{
@ -85,7 +103,7 @@ bool cpu_thread::check_status()
continue;
}
cv.wait(lock);
get_current_thread_cv().wait(lock);
}
const auto state_ = state.load();
@ -104,6 +122,11 @@ bool cpu_thread::check_status()
return false;
}
[[noreturn]] void cpu_thread::xsleep()
{
throw std::runtime_error("cpu_thread: sleep()/awake() inconsistency");
}
std::vector<std::shared_ptr<cpu_thread>> get_all_cpu_threads()
{
std::vector<std::shared_ptr<cpu_thread>> result;

View file

@ -34,26 +34,14 @@ class cpu_thread : public named_thread
void on_task() override;
public:
virtual void on_init() override
{
named_thread::on_init();
}
virtual void on_stop() override
{
state += cpu_state::exit;
safe_notify();
}
virtual void on_stop() override;
virtual ~cpu_thread() override;
const std::string name;
const u32 id{};
const u32 id = -1;
const cpu_type type;
cpu_thread(cpu_type type, const std::string& name)
: type(type)
, name(name)
{
}
cpu_thread(cpu_type type, const std::string& name);
// Public thread state
atomic_t<mset<cpu_state>> state{ cpu_state::stop };
@ -79,15 +67,15 @@ public:
virtual bool handle_interrupt() { return false; }
private:
[[noreturn]] void xsleep()
{
throw std::runtime_error("cpu_thread: sleep()/awake() inconsistency");
}
[[noreturn]] void xsleep();
// Sleep/Awake counter
atomic_t<u32> m_sleep{};
};
extern std::mutex& get_current_thread_mutex();
extern std::condition_variable& get_current_thread_cv();
inline cpu_thread* get_current_cpu_thread() noexcept
{
extern thread_local cpu_thread* g_tls_current_cpu_thread;

View file

@ -468,7 +468,7 @@ void adecOpen(u32 adec_id) // TODO: call from the constructor
adec.adecCb->cpu_init();
adec.adecCb->state -= cpu_state::stop;
adec.adecCb->safe_notify();
adec.adecCb->lock_notify();
}
bool adecCheckType(s32 type)

View file

@ -280,8 +280,6 @@ void audio_config::on_task()
LV2_LOCK;
std::lock_guard<std::mutex> lock(mutex);
for (u64 key : keys)
{
if (auto&& queue = lv2_event_queue_t::find(key))
@ -699,7 +697,7 @@ s32 cellAudioSetNotifyEventQueue(u64 key)
return CELL_AUDIO_ERROR_NOT_INIT;
}
std::lock_guard<std::mutex> lock(g_audio->mutex);
LV2_LOCK;
for (auto k : g_audio->keys) // check for duplicates
{
@ -734,7 +732,7 @@ s32 cellAudioRemoveNotifyEventQueue(u64 key)
return CELL_AUDIO_ERROR_NOT_INIT;
}
std::lock_guard<std::mutex> lock(g_audio->mutex);
LV2_LOCK;
for (auto i = g_audio->keys.begin(); i != g_audio->keys.end(); i++)
{

View file

@ -9,8 +9,6 @@ s32 cellAudioOutGetSoundAvailability(u32 audioOut, u32 type, u32 fs, u32 option)
{
cellSysutil.warning("cellAudioOutGetSoundAvailability(audioOut=%d, type=%d, fs=0x%x, option=%d)", audioOut, type, fs, option);
option = 0;
s32 available = 8; // should be at least 2
switch (fs)
@ -49,8 +47,6 @@ s32 cellAudioOutGetSoundAvailability2(u32 audioOut, u32 type, u32 fs, u32 ch, u3
{
cellSysutil.warning("cellAudioOutGetSoundAvailability2(audioOut=%d, type=%d, fs=0x%x, ch=%d, option=%d)", audioOut, type, fs, ch, option);
option = 0;
s32 available = 8; // should be at least 2
switch (fs)

View file

@ -759,7 +759,7 @@ void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
dmux.dmuxCb->cpu_init();
dmux.dmuxCb->state -= cpu_state::stop;
dmux.dmuxCb->safe_notify();
dmux.dmuxCb->lock_notify();
}
s32 cellDmuxQueryAttr(vm::cptr<CellDmuxType> type, vm::ptr<CellDmuxAttr> attr)

View file

@ -263,7 +263,7 @@ s32 cellFsReadWithOffset(u32 fd, u64 offset, vm::ptr<void> buf, u64 buffer_size,
const auto read = file->file.read(buf.get_ptr(), buffer_size);
ASSERT(file->file.seek(old_pos) == old_pos);
file->file.seek(old_pos);
if (nread)
{
@ -292,7 +292,7 @@ s32 cellFsWriteWithOffset(u32 fd, u64 offset, vm::cptr<void> buf, u64 data_size,
const auto written = file->file.write(buf.get_ptr(), data_size);
ASSERT(file->file.seek(old_pos) == old_pos);
file->file.seek(old_pos);
if (nwrite)
{
@ -502,7 +502,7 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
auto old = file->file.pos();
file->file.seek(offset + file->st_total_read);
auto res = file->file.read(vm::base(position), file->st_block_size);
ASSERT(file->file.seek(old) == old);
file->file.seek(old);
// notify
file->st_total_read += res;
@ -796,11 +796,11 @@ s32 sdata_unpack(const std::string& packed_file, const std::string& unpacked_fil
if (flags & 0x20)
{
ASSERT(packed_stream.seek(0x100) == 0x100);
packed_stream.seek(0x100);
}
else
{
ASSERT(packed_stream.seek(startOffset) == startOffset);
packed_stream.seek(startOffset);
}
for (u32 i = 0; i < blockCount; i++)
@ -887,7 +887,7 @@ void fsAio(vm::ptr<CellFsAio> aio, bool write, s32 xid, fs_aio_cb_t func)
? file->file.write(aio->buf.get_ptr(), aio->size)
: file->file.read(aio->buf.get_ptr(), aio->size);
ASSERT(file->file.seek(old_pos) == old_pos);
file->file.seek(old_pos);
}
// should be executed directly by FS AIO thread

View file

@ -7,7 +7,7 @@
#include "cellMsgDialog.h"
#include "cellGame.h"
#include <future>
#include "Loader/PSF.h"
LOG_CHANNEL(cellGame);
@ -20,21 +20,25 @@ LOG_CHANNEL(cellGame);
// Usual (persistent) content directory (if is_temporary):
// contentInfo = "/dev_hdd0/game/" + dir
// usrdir = "/dev_hdd0/game/" + dir + "/USRDIR"
struct content_permission_t final
struct content_permission final
{
// content directory name or path
// Content directory name or path
const std::string dir;
// true if temporary directory is created and must be moved or deleted
// SFO file
psf::registry sfo;
// True if temporary directory is created and must be moved or deleted
bool is_temporary = false;
content_permission_t(const std::string& dir, bool is_temp)
: dir(dir)
content_permission(std::string&& dir, psf::registry&& sfo, bool is_temp)
: dir(std::move(dir))
, sfo(std::move(sfo))
, is_temporary(is_temp)
{
}
~content_permission_t()
~content_permission()
{
if (is_temporary)
{
@ -145,7 +149,7 @@ s32 cellGameDataExitBroken()
}
s32 cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGameContentSize> size, vm::ptr<char[CELL_GAME_DIRNAME_SIZE]> dirName)
ppu_error_code cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGameContentSize> size, vm::ptr<char[CELL_GAME_DIRNAME_SIZE]> dirName)
{
cellGame.warning("cellGameBootCheck(type=*0x%x, attributes=*0x%x, size=*0x%x, dirName=*0x%x)", type, attributes, size, dirName);
@ -160,7 +164,9 @@ s32 cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGa
}
// According to testing (in debug mode) cellGameBootCheck doesn't return an error code, when PARAM.SFO doesn't exist.
const std::string& category = psf::get_string(Emu.GetPSF(), "CATEGORY");
psf::registry&& sfo = psf::load_object(fs::file(vfs::get("/app_home/../PARAM.SFO")));
const std::string& category = psf::get_string(sfo, "CATEGORY");
if (category == "DG")
{
@ -168,7 +174,7 @@ s32 cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGa
*attributes = 0; // TODO
if (dirName) strcpy_trunc(*dirName, ""); // ???
if (!fxm::make<content_permission_t>("/dev_bdvd/PS3_GAME", false))
if (!fxm::make<content_permission>("/dev_bdvd/PS3_GAME", std::move(sfo), false))
{
return CELL_GAME_ERROR_BUSY;
}
@ -179,7 +185,7 @@ s32 cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGa
*attributes = 0; // TODO
if (dirName) strcpy_trunc(*dirName, Emu.GetTitleID());
if (!fxm::make<content_permission_t>("/dev_hdd0/game/" + Emu.GetTitleID(), false))
if (!fxm::make<content_permission>("/dev_hdd0/game/" + Emu.GetTitleID(), std::move(sfo), false))
{
return CELL_GAME_ERROR_BUSY;
}
@ -190,20 +196,20 @@ s32 cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGa
*attributes = CELL_GAME_ATTRIBUTE_PATCH; // TODO
if (dirName) strcpy_trunc(*dirName, Emu.GetTitleID()); // ???
if (!fxm::make<content_permission_t>("/dev_bdvd/PS3_GAME", false))
if (!fxm::make<content_permission>("/dev_bdvd/PS3_GAME", std::move(sfo), false))
{
return CELL_GAME_ERROR_BUSY;
}
}
else
{
cellGame.error("cellGameBootCheck(): Unknown CATEGORY value: %s", category);
throw fmt::exception("cellGameBootCheck(): Unknown CATEGORY: %s", category);
}
return CELL_GAME_RET_OK;
}
s32 cellGamePatchCheck(vm::ptr<CellGameContentSize> size, vm::ptr<void> reserved)
ppu_error_code cellGamePatchCheck(vm::ptr<CellGameContentSize> size, vm::ptr<void> reserved)
{
cellGame.warning("cellGamePatchCheck(size=*0x%x, reserved=*0x%x)", size, reserved);
@ -217,13 +223,14 @@ s32 cellGamePatchCheck(vm::ptr<CellGameContentSize> size, vm::ptr<void> reserved
size->sysSizeKB = 0;
}
if (psf::get_string(Emu.GetPSF(), "CATEGORY") != "GD")
psf::registry&& sfo = psf::load_object(fs::file(vfs::get("/app_home/../PARAM.SFO")));
if (psf::get_string(sfo, "CATEGORY") != "GD")
{
cellGame.error("cellGamePatchCheck(): CELL_GAME_ERROR_NOTPATCH");
return CELL_GAME_ERROR_NOTPATCH;
}
if (!fxm::make<content_permission_t>("/dev_hdd0/game/" + Emu.GetTitleID(), false))
if (!fxm::make<content_permission>("/dev_hdd0/game/" + Emu.GetTitleID(), std::move(sfo), false))
{
return CELL_GAME_ERROR_BUSY;
}
@ -231,13 +238,12 @@ s32 cellGamePatchCheck(vm::ptr<CellGameContentSize> size, vm::ptr<void> reserved
return CELL_OK;
}
s32 cellGameDataCheck(u32 type, vm::cptr<char> dirName, vm::ptr<CellGameContentSize> size)
ppu_error_code cellGameDataCheck(u32 type, vm::cptr<char> dirName, vm::ptr<CellGameContentSize> size)
{
cellGame.warning("cellGameDataCheck(type=%d, dirName=*0x%x, size=*0x%x)", type, dirName, size);
if ((type - 1) >= 3)
{
cellGame.error("cellGameDataCheck(): CELL_GAME_ERROR_PARAM");
return CELL_GAME_ERROR_PARAM;
}
@ -252,7 +258,7 @@ s32 cellGameDataCheck(u32 type, vm::cptr<char> dirName, vm::ptr<CellGameContentS
}
// TODO: not sure what should be checked there
const std::string& dir = type == CELL_GAME_GAMETYPE_DISC ? "/dev_bdvd/PS3_GAME"s : "/dev_hdd0/game/"s + dirName.get_ptr();
std::string&& dir = type == CELL_GAME_GAMETYPE_DISC ? "/dev_bdvd/PS3_GAME"s : "/dev_hdd0/game/"s + dirName.get_ptr();
if (!fs::is_dir(vfs::get(dir)))
{
@ -260,7 +266,7 @@ s32 cellGameDataCheck(u32 type, vm::cptr<char> dirName, vm::ptr<CellGameContentS
return CELL_GAME_RET_NONE;
}
if (!fxm::make<content_permission_t>(dir, false))
if (!fxm::make<content_permission>(std::move(dir), psf::load_object(fs::file(vfs::get(dir + "/PARAM.SFO"))), false))
{
return CELL_GAME_ERROR_BUSY;
}
@ -268,7 +274,7 @@ s32 cellGameDataCheck(u32 type, vm::cptr<char> dirName, vm::ptr<CellGameContentS
return CELL_GAME_RET_OK;
}
s32 cellGameContentPermit(vm::ptr<char[CELL_GAME_PATH_MAX]> contentInfoPath, vm::ptr<char[CELL_GAME_PATH_MAX]> usrdirPath)
ppu_error_code cellGameContentPermit(vm::ptr<char[CELL_GAME_PATH_MAX]> contentInfoPath, vm::ptr<char[CELL_GAME_PATH_MAX]> usrdirPath)
{
cellGame.warning("cellGameContentPermit(contentInfoPath=*0x%x, usrdirPath=*0x%x)", contentInfoPath, usrdirPath);
@ -277,21 +283,21 @@ s32 cellGameContentPermit(vm::ptr<char[CELL_GAME_PATH_MAX]> contentInfoPath, vm:
return CELL_GAME_ERROR_PARAM;
}
const auto path_set = fxm::withdraw<content_permission_t>();
const auto prm = fxm::withdraw<content_permission>();
if (!path_set)
if (!prm)
{
return CELL_GAME_ERROR_FAILURE;
}
if (path_set->is_temporary)
if (prm->is_temporary)
{
const std::string& dir = "/dev_hdd0/game/" + path_set->dir;
const std::string& dir = "/dev_hdd0/game/" + prm->dir;
// Make temporary directory persistent
fs::remove_all(vfs::get(dir));
if (fs::rename(vfs::get("/dev_hdd1/game/" + path_set->dir), vfs::get(dir)))
if (fs::rename(vfs::get("/dev_hdd1/game/" + prm->dir), vfs::get(dir)))
{
cellGame.success("cellGameContentPermit(): created directory %s", dir);
}
@ -300,28 +306,30 @@ s32 cellGameContentPermit(vm::ptr<char[CELL_GAME_PATH_MAX]> contentInfoPath, vm:
throw fmt::exception("cellGameContentPermit(): failed to rename to %s", dir);
}
// Create PARAM.SFO
fs::file(dir + "/PARAM.SFO", fs::rewrite).write(psf::save_object(prm->sfo));
// Disable deletion
path_set->is_temporary = false;
prm->is_temporary = false;
strcpy_trunc(*contentInfoPath, dir);
strcpy_trunc(*usrdirPath, dir + "/USRDIR");
}
else
{
strcpy_trunc(*contentInfoPath, path_set->dir);
strcpy_trunc(*usrdirPath, path_set->dir + "/USRDIR");
strcpy_trunc(*contentInfoPath, prm->dir);
strcpy_trunc(*usrdirPath, prm->dir + "/USRDIR");
}
return CELL_OK;
}
s32 cellGameDataCheckCreate2(PPUThread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, vm::ptr<CellGameDataStatCallback> funcStat, u32 container)
ppu_error_code cellGameDataCheckCreate2(PPUThread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, vm::ptr<CellGameDataStatCallback> funcStat, u32 container)
{
cellGame.error("cellGameDataCheckCreate2(version=0x%x, dirName=*0x%x, errDialog=0x%x, funcStat=*0x%x, container=%d)", version, dirName, errDialog, funcStat, container);
if (version != CELL_GAMEDATA_VERSION_CURRENT || errDialog > 1)
{
cellGame.error("cellGameDataCheckCreate2(): CELL_GAMEDATA_ERROR_PARAM");
return CELL_GAMEDATA_ERROR_PARAM;
}
@ -356,11 +364,13 @@ s32 cellGameDataCheckCreate2(PPUThread& ppu, u32 version, vm::cptr<char> dirName
cbGet->sizeKB = CELL_GAMEDATA_SIZEKB_NOTCALC;
cbGet->sysSizeKB = 0;
psf::registry&& sfo = psf::load_object(fs::file(vfs::get("/app_home/../PARAM.SFO")));
cbGet->getParam.attribute = CELL_GAMEDATA_ATTR_NORMAL;
cbGet->getParam.parentalLevel = Emu.GetPSF().at("PARENTAL_LEVEL").as_integer();
strcpy_trunc(cbGet->getParam.dataVersion, Emu.GetPSF().at("APP_VER").as_string());
strcpy_trunc(cbGet->getParam.titleId, Emu.GetPSF().at("TITLE_ID").as_string());
strcpy_trunc(cbGet->getParam.title, Emu.GetPSF().at("TITLE").as_string());
cbGet->getParam.parentalLevel = psf::get_integer(sfo, "PARENTAL_LEVEL", 0);
strcpy_trunc(cbGet->getParam.dataVersion, psf::get_string(sfo, "APP_VER", ""));
strcpy_trunc(cbGet->getParam.titleId, psf::get_string(sfo, "TITLE_ID", ""));
strcpy_trunc(cbGet->getParam.title, psf::get_string(sfo, "TITLE", ""));
// TODO: write lang titles
funcStat(ppu, cbResult, cbGet, cbSet);
@ -410,11 +420,12 @@ s32 cellGameDataCheckCreate(PPUThread& ppu, u32 version, vm::cptr<char> dirName,
return cellGameDataCheckCreate2(ppu, version, dirName, errDialog, funcStat, container);
}
s32 cellGameCreateGameData(vm::ptr<CellGameSetInitParams> init, vm::ptr<char[CELL_GAME_PATH_MAX]> tmp_contentInfoPath, vm::ptr<char[CELL_GAME_PATH_MAX]> tmp_usrdirPath)
ppu_error_code cellGameCreateGameData(vm::ptr<CellGameSetInitParams> init, vm::ptr<char[CELL_GAME_PATH_MAX]> tmp_contentInfoPath, vm::ptr<char[CELL_GAME_PATH_MAX]> tmp_usrdirPath)
{
cellGame.error("cellGameCreateGameData(init=*0x%x, tmp_contentInfoPath=*0x%x, tmp_usrdirPath=*0x%x)", init, tmp_contentInfoPath, tmp_usrdirPath);
std::string dir = init->titleId;
std::string&& dir = init->titleId;
std::string tmp_contentInfo = "/dev_hdd1/game/" + dir;
std::string tmp_usrdir = "/dev_hdd1/game/" + dir + "/USRDIR";
@ -430,7 +441,14 @@ s32 cellGameCreateGameData(vm::ptr<CellGameSetInitParams> init, vm::ptr<char[CEL
return CELL_GAME_ERROR_ACCESS_ERROR; // ???
}
if (!fxm::make<content_permission_t>(dir, true))
psf::registry sfo
{
{ "TITLE_ID", psf::string(CELL_GAME_SYSP_TITLEID_SIZE, init->titleId) },
{ "TITLE", psf::string(CELL_GAME_SYSP_TITLE_SIZE, init->title) },
{ "VERSION", psf::string(CELL_GAME_SYSP_VERSION_SIZE, init->version) },
};
if (!fxm::make<content_permission>(std::move(dir), std::move(sfo), true))
{
return CELL_GAME_ERROR_BUSY;
}
@ -452,10 +470,17 @@ s32 cellGameDeleteGameData()
return CELL_OK;
}
s32 cellGameGetParamInt(u32 id, vm::ptr<u32> value)
ppu_error_code cellGameGetParamInt(s32 id, vm::ptr<s32> value)
{
cellGame.warning("cellGameGetParamInt(id=%d, value=*0x%x)", id, value);
const auto prm = fxm::get<content_permission>();
if (!prm)
{
return CELL_GAME_ERROR_FAILURE;
}
std::string key;
switch(id)
@ -464,63 +489,106 @@ s32 cellGameGetParamInt(u32 id, vm::ptr<u32> value)
case CELL_GAME_PARAMID_RESOLUTION: key = "RESOLUTION"; break;
case CELL_GAME_PARAMID_SOUND_FORMAT: key = "SOUND_FORMAT"; break;
default:
cellGame.error("cellGameGetParamInt(): Unimplemented parameter (%d)", id);
{
return CELL_GAME_ERROR_INVALID_ID;
}
}
*value = Emu.GetPSF().at(key).as_integer();
*value = psf::get_integer(prm->sfo, key, 0);
return CELL_OK;
}
s32 cellGameGetParamString(u32 id, vm::ptr<char> buf, u32 bufsize)
static const char* get_param_string_key(s32 id)
{
switch (id)
{
case CELL_GAME_PARAMID_TITLE: return "TITLE"; // TODO: Is this value correct?
case CELL_GAME_PARAMID_TITLE_DEFAULT: return "TITLE";
case CELL_GAME_PARAMID_TITLE_JAPANESE: return "TITLE_00";
case CELL_GAME_PARAMID_TITLE_ENGLISH: return "TITLE_01";
case CELL_GAME_PARAMID_TITLE_FRENCH: return "TITLE_02";
case CELL_GAME_PARAMID_TITLE_SPANISH: return "TITLE_03";
case CELL_GAME_PARAMID_TITLE_GERMAN: return "TITLE_04";
case CELL_GAME_PARAMID_TITLE_ITALIAN: return "TITLE_05";
case CELL_GAME_PARAMID_TITLE_DUTCH: return "TITLE_06";
case CELL_GAME_PARAMID_TITLE_PORTUGUESE: return "TITLE_07";
case CELL_GAME_PARAMID_TITLE_RUSSIAN: return "TITLE_08";
case CELL_GAME_PARAMID_TITLE_KOREAN: return "TITLE_09";
case CELL_GAME_PARAMID_TITLE_CHINESE_T: return "TITLE_10";
case CELL_GAME_PARAMID_TITLE_CHINESE_S: return "TITLE_11";
case CELL_GAME_PARAMID_TITLE_FINNISH: return "TITLE_12";
case CELL_GAME_PARAMID_TITLE_SWEDISH: return "TITLE_13";
case CELL_GAME_PARAMID_TITLE_DANISH: return "TITLE_14";
case CELL_GAME_PARAMID_TITLE_NORWEGIAN: return "TITLE_15";
case CELL_GAME_PARAMID_TITLE_POLISH: return "TITLE_16";
case CELL_GAME_PARAMID_TITLE_PORTUGUESE_BRAZIL: return "TITLE_17";
case CELL_GAME_PARAMID_TITLE_ENGLISH_UK: return "TITLE_18";
case CELL_GAME_PARAMID_TITLE_ID: return "TITLE_ID";
case CELL_GAME_PARAMID_VERSION: return "VERSION";
case CELL_GAME_PARAMID_PS3_SYSTEM_VER: return "PS3_SYSTEM_VER";
case CELL_GAME_PARAMID_APP_VER: return "APP_VER";
}
return nullptr;
}
ppu_error_code cellGameGetParamString(s32 id, vm::ptr<char> buf, u32 bufsize)
{
cellGame.warning("cellGameGetParamString(id=%d, buf=*0x%x, bufsize=%d)", id, buf, bufsize);
std::string key;
const auto prm = fxm::get<content_permission>();
switch(id)
if (!prm)
{
case CELL_GAME_PARAMID_TITLE: key = "TITLE"; break; // TODO: Is this value correct?
case CELL_GAME_PARAMID_TITLE_DEFAULT: key = "TITLE"; break;
case CELL_GAME_PARAMID_TITLE_JAPANESE: key = "TITLE_00"; break;
case CELL_GAME_PARAMID_TITLE_ENGLISH: key = "TITLE_01"; break;
case CELL_GAME_PARAMID_TITLE_FRENCH: key = "TITLE_02"; break;
case CELL_GAME_PARAMID_TITLE_SPANISH: key = "TITLE_03"; break;
case CELL_GAME_PARAMID_TITLE_GERMAN: key = "TITLE_04"; break;
case CELL_GAME_PARAMID_TITLE_ITALIAN: key = "TITLE_05"; break;
case CELL_GAME_PARAMID_TITLE_DUTCH: key = "TITLE_06"; break;
case CELL_GAME_PARAMID_TITLE_PORTUGUESE: key = "TITLE_07"; break;
case CELL_GAME_PARAMID_TITLE_RUSSIAN: key = "TITLE_08"; break;
case CELL_GAME_PARAMID_TITLE_KOREAN: key = "TITLE_09"; break;
case CELL_GAME_PARAMID_TITLE_CHINESE_T: key = "TITLE_10"; break;
case CELL_GAME_PARAMID_TITLE_CHINESE_S: key = "TITLE_11"; break;
case CELL_GAME_PARAMID_TITLE_FINNISH: key = "TITLE_12"; break;
case CELL_GAME_PARAMID_TITLE_SWEDISH: key = "TITLE_13"; break;
case CELL_GAME_PARAMID_TITLE_DANISH: key = "TITLE_14"; break;
case CELL_GAME_PARAMID_TITLE_NORWEGIAN: key = "TITLE_15"; break;
case CELL_GAME_PARAMID_TITLE_POLISH: key = "TITLE_16"; break;
case CELL_GAME_PARAMID_TITLE_PORTUGUESE_BRAZIL: key = "TITLE_17"; break;
case CELL_GAME_PARAMID_TITLE_ENGLISH_UK: key = "TITLE_18"; break;
return CELL_GAME_ERROR_FAILURE;
}
case CELL_GAME_PARAMID_TITLE_ID: key = "TITLE_ID"; break;
case CELL_GAME_PARAMID_VERSION: key = "PS3_SYSTEM_VER"; break;
case CELL_GAME_PARAMID_APP_VER: key = "APP_VER"; break;
const auto key = get_param_string_key(id);
default:
cellGame.error("cellGameGetParamString(): Unimplemented parameter (%d)", id);
if (!key)
{
return CELL_GAME_ERROR_INVALID_ID;
}
const std::string& value = Emu.GetPSF().at(key).as_string().substr(0, bufsize - 1);
std::string&& value = psf::get_string(prm->sfo, key);
value.resize(bufsize - 1);
std::copy_n(value.c_str(), value.size() + 1, buf.get_ptr());
return CELL_OK;
}
s32 cellGameSetParamString()
ppu_error_code cellGameSetParamString(s32 id, vm::cptr<char> buf)
{
UNIMPLEMENTED_FUNC(cellGame);
cellGame.warning("cellGameSetParamString(id=%d, buf=*0x%x)", id, buf);
const auto prm = fxm::get<content_permission>();
if (!prm)
{
return CELL_GAME_ERROR_FAILURE;
}
const auto key = get_param_string_key(id);
if (!key)
{
return CELL_GAME_ERROR_INVALID_ID;
}
u32 max_size = CELL_GAME_SYSP_TITLE_SIZE;
switch (id)
{
case CELL_GAME_PARAMID_TITLE_ID: max_size = CELL_GAME_SYSP_TITLEID_SIZE; break;
case CELL_GAME_PARAMID_VERSION: max_size = CELL_GAME_SYSP_VERSION_SIZE; break;
case CELL_GAME_PARAMID_PS3_SYSTEM_VER: max_size = CELL_GAME_SYSP_PS3_SYSTEM_VER_SIZE; break;
case CELL_GAME_PARAMID_APP_VER: max_size = CELL_GAME_SYSP_APP_VER_SIZE; break;
}
prm->sfo.emplace(key, psf::string(max_size, buf.get_ptr()));
return CELL_OK;
}
@ -542,7 +610,7 @@ s32 cellGameGetLocalWebContentPath()
return CELL_OK;
}
s32 cellGameContentErrorDialog(s32 type, s32 errNeedSizeKB, vm::cptr<char> dirName)
ppu_error_code cellGameContentErrorDialog(s32 type, s32 errNeedSizeKB, vm::cptr<char> dirName)
{
cellGame.warning("cellGameContentErrorDialog(type=%d, errNeedSizeKB=%d, dirName=*0x%x)", type, errNeedSizeKB, dirName);
@ -579,17 +647,20 @@ s32 cellGameContentErrorDialog(s32 type, s32 errNeedSizeKB, vm::cptr<char> dirNa
dlg->type.button_type = 2; // OK
dlg->type.disable_cancel = true;
const auto p = std::make_shared<std::promise<void>>();
std::future<void> future = p->get_future();
atomic_t<bool> result(false);
dlg->on_close = [=](s32 status)
dlg->on_close = [&](s32 status)
{
p->set_value();
result = true;
};
dlg->Create(errorMsg);
future.get();
while (!result)
{
CHECK_EMU_STATUS;
std::this_thread::sleep_for(1ms);
}
return CELL_OK;
}

View file

@ -1,5 +1,7 @@
#pragma once
#include "Emu/Cell/ErrorCodes.h"
namespace vm { using namespace ps3; }
// Return Codes
@ -8,24 +10,54 @@ enum
CELL_GAME_RET_OK = 0,
CELL_GAME_RET_CANCEL = 1,
CELL_GAME_RET_NONE = 2,
CELL_GAME_ERROR_NOTFOUND = 0x8002cb04,
CELL_GAME_ERROR_BROKEN = 0x8002cb05,
CELL_GAME_ERROR_INTERNAL = 0x8002cb06,
CELL_GAME_ERROR_PARAM = 0x8002cb07,
CELL_GAME_ERROR_NOAPP = 0x8002cb08,
CELL_GAME_ERROR_ACCESS_ERROR = 0x8002cb09,
CELL_GAME_ERROR_NOSPACE = 0x8002cb20,
CELL_GAME_ERROR_NOTSUPPORTED = 0x8002cb21,
CELL_GAME_ERROR_FAILURE = 0x8002cb22,
CELL_GAME_ERROR_BUSY = 0x8002cb23,
CELL_GAME_ERROR_IN_SHUTDOWN = 0x8002cb24,
CELL_GAME_ERROR_INVALID_ID = 0x8002cb25,
CELL_GAME_ERROR_EXIST = 0x8002cb26,
CELL_GAME_ERROR_NOTPATCH = 0x8002cb27,
CELL_GAME_ERROR_INVALID_THEME_FILE = 0x8002cb28,
CELL_GAME_ERROR_BOOTPATH = 0x8002cb50,
};
enum CellGameError : s32
{
CELL_GAME_ERROR_NOTFOUND = ERROR_CODE(0x8002cb04),
CELL_GAME_ERROR_BROKEN = ERROR_CODE(0x8002cb05),
CELL_GAME_ERROR_INTERNAL = ERROR_CODE(0x8002cb06),
CELL_GAME_ERROR_PARAM = ERROR_CODE(0x8002cb07),
CELL_GAME_ERROR_NOAPP = ERROR_CODE(0x8002cb08),
CELL_GAME_ERROR_ACCESS_ERROR = ERROR_CODE(0x8002cb09),
CELL_GAME_ERROR_NOSPACE = ERROR_CODE(0x8002cb20),
CELL_GAME_ERROR_NOTSUPPORTED = ERROR_CODE(0x8002cb21),
CELL_GAME_ERROR_FAILURE = ERROR_CODE(0x8002cb22),
CELL_GAME_ERROR_BUSY = ERROR_CODE(0x8002cb23),
CELL_GAME_ERROR_IN_SHUTDOWN = ERROR_CODE(0x8002cb24),
CELL_GAME_ERROR_INVALID_ID = ERROR_CODE(0x8002cb25),
CELL_GAME_ERROR_EXIST = ERROR_CODE(0x8002cb26),
CELL_GAME_ERROR_NOTPATCH = ERROR_CODE(0x8002cb27),
CELL_GAME_ERROR_INVALID_THEME_FILE = ERROR_CODE(0x8002cb28),
CELL_GAME_ERROR_BOOTPATH = ERROR_CODE(0x8002cb50),
};
template<>
inline const char* ppu_error_code::print(CellGameError error)
{
switch (error)
{
STR_CASE(CELL_GAME_ERROR_NOTFOUND);
STR_CASE(CELL_GAME_ERROR_BROKEN);
STR_CASE(CELL_GAME_ERROR_INTERNAL);
STR_CASE(CELL_GAME_ERROR_PARAM);
STR_CASE(CELL_GAME_ERROR_NOAPP);
STR_CASE(CELL_GAME_ERROR_ACCESS_ERROR);
STR_CASE(CELL_GAME_ERROR_NOSPACE);
STR_CASE(CELL_GAME_ERROR_NOTSUPPORTED);
STR_CASE(CELL_GAME_ERROR_FAILURE);
STR_CASE(CELL_GAME_ERROR_BUSY);
STR_CASE(CELL_GAME_ERROR_IN_SHUTDOWN);
STR_CASE(CELL_GAME_ERROR_INVALID_ID);
STR_CASE(CELL_GAME_ERROR_EXIST);
STR_CASE(CELL_GAME_ERROR_NOTPATCH);
STR_CASE(CELL_GAME_ERROR_INVALID_THEME_FILE);
STR_CASE(CELL_GAME_ERROR_BOOTPATH);
}
return nullptr;
}
// Definitions
enum
{
@ -37,6 +69,7 @@ enum
CELL_GAME_SYSP_TITLE_SIZE = 128,
CELL_GAME_SYSP_TITLEID_SIZE = 10,
CELL_GAME_SYSP_VERSION_SIZE = 6,
CELL_GAME_SYSP_PS3_SYSTEM_VER_SIZE = 8,
CELL_GAME_SYSP_APP_VER_SIZE = 6,
CELL_GAME_GAMETYPE_SYS = 0,
@ -90,6 +123,7 @@ enum
CELL_GAME_PARAMID_TITLE_ENGLISH_UK = 20,
CELL_GAME_PARAMID_TITLE_ID = 100,
CELL_GAME_PARAMID_VERSION = 101,
CELL_GAME_PARAMID_PS3_SYSTEM_VER = 105,
CELL_GAME_PARAMID_APP_VER = 106,
};

View file

@ -510,23 +510,13 @@ s32 cellGcmSetPrepareFlip(PPUThread& ppu, vm::ptr<CellGcmContextData> ctxt, u32
return res;
}
}
#if 1
*ctxt->current++ = (GCM_FLIP_COMMAND << 2) | (1 << 18);
*ctxt->current++ = id;
const u32 cmd_size = rsx::make_command(ctxt->current, GCM_FLIP_COMMAND, { id });
if (ctxt.addr() == gcm_info.context_addr)
{
vm::_ref<CellGcmControl>(gcm_info.control_addr).put += 2 * sizeof(u32);
vm::_ref<CellGcmControl>(gcm_info.control_addr).put += cmd_size;
}
#else
// internal compiler error, try to avoid it for now
u32 command_size = rsx::make_command(ctxt->current, GCM_FLIP_COMMAND, id);
if (ctxt.addr() == gcm_info.context_addr)
{
vm::_ref<CellGcmControl>(gcm_info.control_addr).put += command_size * sizeof(u32);
}
#endif
return id;
}

View file

@ -6,8 +6,6 @@
#include "cellSysutil.h"
#include "cellMsgDialog.h"
#include <future>
extern _log::channel cellSysutil;
s32 cellMsgDialogOpen()
@ -103,21 +101,20 @@ s32 cellMsgDialogOpen2(u32 type, vm::cptr<char> msgString, vm::ptr<CellMsgDialog
}
};
// Make "shared" promise to workaround std::function limitation
auto spr = std::make_shared<std::promise<void>>();
// Get future
std::future<void> future = spr->get_future();
atomic_t<bool> result(false);
// Run asynchronously in GUI thread
Emu.CallAfter([&, spr = std::move(spr)]()
Emu.CallAfter([&]()
{
dlg->Create(msgString.get_ptr());
spr->set_value();
result = true;
});
// Wait for the "result"
future.get();
while (!result)
{
CHECK_EMU_STATUS;
std::this_thread::sleep_for(1ms);
}
return CELL_OK;
}
@ -219,7 +216,7 @@ s32 cellMsgDialogClose(f32 delay)
const u64 wait_until = get_system_time() + static_cast<s64>(std::max<float>(delay, 0.0f) * 1000);
thread_ctrl::spawn("MsgDialog Thread", [=]()
thread_ctrl::spawn("cellMsgDialogClose() Thread", [=]()
{
while (dlg->state == MsgDialogState::Open && get_system_time() < wait_until)
{

View file

@ -4,6 +4,8 @@
#include "cellSaveData.h"
#include "Loader/PSF.h"
LOG_CHANNEL(cellSaveData);
// cellSaveData aliases (only for cellSaveData.cpp)

View file

@ -770,7 +770,7 @@ void spursSysServiceIdleHandler(SPUThread& spu, SpursKernelContext* ctxt)
{
bool shouldExit;
std::unique_lock<std::mutex> lock(spu.mutex, std::defer_lock);
std::unique_lock<std::mutex> lock(get_current_thread_mutex(), std::defer_lock);
while (true)
{
@ -861,7 +861,7 @@ void spursSysServiceIdleHandler(SPUThread& spu, SpursKernelContext* ctxt)
// The system service blocks by making a reservation and waiting on the lock line reservation lost event.
CHECK_EMU_STATUS;
if (!lock) lock.lock();
spu.cv.wait_for(lock, std::chrono::milliseconds(1));
get_current_thread_cv().wait_for(lock, std::chrono::milliseconds(1));
continue;
}

View file

@ -6,6 +6,8 @@
#include "Emu/Cell/lv2/sys_process.h"
#include "cellSync.h"
#include "Emu/Memory/wait_engine.h"
LOG_CHANNEL(cellSync);
namespace _sync
@ -45,7 +47,7 @@ ppu_error_code cellSyncMutexInitialize(vm::ptr<CellSyncMutex> mutex)
return CELL_OK;
}
ppu_error_code cellSyncMutexLock(PPUThread& ppu, vm::ptr<CellSyncMutex> mutex)
ppu_error_code cellSyncMutexLock(vm::ptr<CellSyncMutex> mutex)
{
cellSync.trace("cellSyncMutexLock(mutex=*0x%x)", mutex);
@ -63,7 +65,7 @@ ppu_error_code cellSyncMutexLock(PPUThread& ppu, vm::ptr<CellSyncMutex> mutex)
const auto order = mutex->ctrl.atomic_op(_sync::mutex_acquire);
// wait until rel value is equal to old acq value
vm::wait_op(ppu, mutex.addr(), 4, WRAP_EXPR(mutex->ctrl.load().rel == order));
vm::wait_op(mutex.addr(), 4, WRAP_EXPR(mutex->ctrl.load().rel == order));
_mm_mfence();
@ -108,7 +110,7 @@ ppu_error_code cellSyncMutexUnlock(vm::ptr<CellSyncMutex> mutex)
mutex->ctrl.atomic_op(_sync::mutex_unlock);
vm::notify_at(mutex);
vm::notify_at(mutex.addr(), 4);
return CELL_OK;
}
@ -138,7 +140,7 @@ ppu_error_code cellSyncBarrierInitialize(vm::ptr<CellSyncBarrier> barrier, u16 t
return CELL_OK;
}
ppu_error_code cellSyncBarrierNotify(PPUThread& ppu, vm::ptr<CellSyncBarrier> barrier)
ppu_error_code cellSyncBarrierNotify(vm::ptr<CellSyncBarrier> barrier)
{
cellSync.trace("cellSyncBarrierNotify(barrier=*0x%x)", barrier);
@ -152,9 +154,9 @@ ppu_error_code cellSyncBarrierNotify(PPUThread& ppu, vm::ptr<CellSyncBarrier> ba
return CELL_SYNC_ERROR_ALIGN;
}
vm::wait_op(ppu, barrier.addr(), 4, WRAP_EXPR(barrier->ctrl.atomic_op(_sync::barrier::try_notify)));
vm::wait_op(barrier.addr(), 4, WRAP_EXPR(barrier->ctrl.atomic_op(_sync::barrier::try_notify)));
vm::notify_at(barrier);
vm::notify_at(barrier.addr(), 4);
return CELL_OK;
}
@ -180,12 +182,12 @@ ppu_error_code cellSyncBarrierTryNotify(vm::ptr<CellSyncBarrier> barrier)
return NOT_AN_ERROR(CELL_SYNC_ERROR_BUSY);
}
vm::notify_at(barrier);
vm::notify_at(barrier.addr(), 4);
return CELL_OK;
}
ppu_error_code cellSyncBarrierWait(PPUThread& ppu, vm::ptr<CellSyncBarrier> barrier)
ppu_error_code cellSyncBarrierWait(vm::ptr<CellSyncBarrier> barrier)
{
cellSync.trace("cellSyncBarrierWait(barrier=*0x%x)", barrier);
@ -201,9 +203,9 @@ ppu_error_code cellSyncBarrierWait(PPUThread& ppu, vm::ptr<CellSyncBarrier> barr
_mm_mfence();
vm::wait_op(ppu, barrier.addr(), 4, WRAP_EXPR(barrier->ctrl.atomic_op(_sync::barrier::try_wait)));
vm::wait_op(barrier.addr(), 4, WRAP_EXPR(barrier->ctrl.atomic_op(_sync::barrier::try_wait)));
vm::notify_at(barrier);
vm::notify_at(barrier.addr(), 4);
return CELL_OK;
}
@ -229,7 +231,7 @@ ppu_error_code cellSyncBarrierTryWait(vm::ptr<CellSyncBarrier> barrier)
return NOT_AN_ERROR(CELL_SYNC_ERROR_BUSY);
}
vm::notify_at(barrier);
vm::notify_at(barrier.addr(), 4);
return CELL_OK;
}
@ -263,7 +265,7 @@ ppu_error_code cellSyncRwmInitialize(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buf
return CELL_OK;
}
ppu_error_code cellSyncRwmRead(PPUThread& ppu, vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer)
ppu_error_code cellSyncRwmRead(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer)
{
cellSync.trace("cellSyncRwmRead(rwm=*0x%x, buffer=*0x%x)", rwm, buffer);
@ -278,7 +280,7 @@ ppu_error_code cellSyncRwmRead(PPUThread& ppu, vm::ptr<CellSyncRwm> rwm, vm::ptr
}
// wait until `writers` is zero, increase `readers`
vm::wait_op(ppu, rwm.addr(), 8, WRAP_EXPR(rwm->ctrl.atomic_op(_sync::rwlock::try_read_begin)));
vm::wait_op(rwm.addr(), 8, WRAP_EXPR(rwm->ctrl.atomic_op(_sync::rwlock::try_read_begin)));
// copy data to buffer
std::memcpy(buffer.get_ptr(), rwm->buffer.get_ptr(), rwm->size);
@ -289,7 +291,7 @@ ppu_error_code cellSyncRwmRead(PPUThread& ppu, vm::ptr<CellSyncRwm> rwm, vm::ptr
return CELL_SYNC_ERROR_ABORT;
}
vm::notify_at(rwm.ptr(&CellSyncRwm::ctrl));
vm::notify_at(rwm.addr(), 8);
return CELL_OK;
}
@ -323,12 +325,12 @@ ppu_error_code cellSyncRwmTryRead(vm::ptr<CellSyncRwm> rwm, vm::ptr<void> buffer
return CELL_SYNC_ERROR_ABORT;
}
vm::notify_at(rwm.ptr(&CellSyncRwm::ctrl));
vm::notify_at(rwm.addr(), 8);
return CELL_OK;
}
ppu_error_code cellSyncRwmWrite(PPUThread& ppu, vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buffer)
ppu_error_code cellSyncRwmWrite(vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buffer)
{
cellSync.trace("cellSyncRwmWrite(rwm=*0x%x, buffer=*0x%x)", rwm, buffer);
@ -343,10 +345,10 @@ ppu_error_code cellSyncRwmWrite(PPUThread& ppu, vm::ptr<CellSyncRwm> rwm, vm::cp
}
// wait until `writers` is zero, set to 1
vm::wait_op(ppu, rwm.addr(), 8, WRAP_EXPR(rwm->ctrl.atomic_op(_sync::rwlock::try_write_begin)));
vm::wait_op(rwm.addr(), 8, WRAP_EXPR(rwm->ctrl.atomic_op(_sync::rwlock::try_write_begin)));
// wait until `readers` is zero
vm::wait_op(ppu, rwm.addr(), 8, WRAP_EXPR(!rwm->ctrl.load().readers));
vm::wait_op(rwm.addr(), 8, WRAP_EXPR(!rwm->ctrl.load().readers));
// copy data from buffer
std::memcpy(rwm->buffer.get_ptr(), buffer.get_ptr(), rwm->size);
@ -354,7 +356,7 @@ ppu_error_code cellSyncRwmWrite(PPUThread& ppu, vm::ptr<CellSyncRwm> rwm, vm::cp
// sync and clear `readers` and `writers`
rwm->ctrl.exchange({ 0, 0 });
vm::notify_at(rwm.ptr(&CellSyncRwm::ctrl));
vm::notify_at(rwm.addr(), 8);
return CELL_OK;
}
@ -385,7 +387,7 @@ ppu_error_code cellSyncRwmTryWrite(vm::ptr<CellSyncRwm> rwm, vm::cptr<void> buff
// sync and clear `readers` and `writers`
rwm->ctrl.exchange({ 0, 0 });
vm::notify_at(rwm.ptr(&CellSyncRwm::ctrl));
vm::notify_at(rwm.addr(), 8);
return CELL_OK;
}
@ -425,7 +427,7 @@ ppu_error_code cellSyncQueueInitialize(vm::ptr<CellSyncQueue> queue, vm::ptr<u8>
return CELL_OK;
}
ppu_error_code cellSyncQueuePush(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, vm::cptr<void> buffer)
ppu_error_code cellSyncQueuePush(vm::ptr<CellSyncQueue> queue, vm::cptr<void> buffer)
{
cellSync.trace("cellSyncQueuePush(queue=*0x%x, buffer=*0x%x)", queue, buffer);
@ -443,7 +445,7 @@ ppu_error_code cellSyncQueuePush(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, v
u32 position;
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_push_begin, depth, &position)));
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_push_begin, depth, &position)));
// copy data from the buffer at the position
std::memcpy(&queue->buffer[position * queue->size], buffer.get_ptr(), queue->size);
@ -451,7 +453,7 @@ ppu_error_code cellSyncQueuePush(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, v
// ...push_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._push = 0; });
vm::notify_at(queue.ptr(&CellSyncQueue::ctrl));
vm::notify_at(queue.addr(), 8);
return CELL_OK;
}
@ -485,12 +487,12 @@ ppu_error_code cellSyncQueueTryPush(vm::ptr<CellSyncQueue> queue, vm::cptr<void>
// ...push_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._push = 0; });
vm::notify_at(queue.ptr(&CellSyncQueue::ctrl));
vm::notify_at(queue.addr(), 8);
return CELL_OK;
}
ppu_error_code cellSyncQueuePop(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
ppu_error_code cellSyncQueuePop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
{
cellSync.trace("cellSyncQueuePop(queue=*0x%x, buffer=*0x%x)", queue, buffer);
@ -508,7 +510,7 @@ ppu_error_code cellSyncQueuePop(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, vm
u32 position;
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_pop_begin, depth, &position)));
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_pop_begin, depth, &position)));
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size);
@ -516,7 +518,7 @@ ppu_error_code cellSyncQueuePop(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, vm
// ...pop_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._pop = 0; });
vm::notify_at(queue.ptr(&CellSyncQueue::ctrl));
vm::notify_at(queue.addr(), 8);
return CELL_OK;
}
@ -550,12 +552,12 @@ ppu_error_code cellSyncQueueTryPop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> b
// ...pop_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._pop = 0; });
vm::notify_at(queue.ptr(&CellSyncQueue::ctrl));
vm::notify_at(queue.addr(), 8);
return CELL_OK;
}
ppu_error_code cellSyncQueuePeek(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
ppu_error_code cellSyncQueuePeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffer)
{
cellSync.trace("cellSyncQueuePeek(queue=*0x%x, buffer=*0x%x)", queue, buffer);
@ -573,7 +575,7 @@ ppu_error_code cellSyncQueuePeek(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, v
u32 position;
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_peek_begin, depth, &position)));
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_peek_begin, depth, &position)));
// copy data at the position to the buffer
std::memcpy(buffer.get_ptr(), &queue->buffer[position % depth * queue->size], queue->size);
@ -581,7 +583,7 @@ ppu_error_code cellSyncQueuePeek(PPUThread& ppu, vm::ptr<CellSyncQueue> queue, v
// ...peek_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._pop = 0; });
vm::notify_at(queue.ptr(&CellSyncQueue::ctrl));
vm::notify_at(queue.addr(), 8);
return CELL_OK;
}
@ -615,7 +617,7 @@ ppu_error_code cellSyncQueueTryPeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void>
// ...peek_end
queue->ctrl.atomic_op([](_sync::queue& ctrl) { ctrl._pop = 0; });
vm::notify_at(queue.ptr(&CellSyncQueue::ctrl));
vm::notify_at(queue.addr(), 8);
return CELL_OK;
}
@ -639,7 +641,7 @@ ppu_error_code cellSyncQueueSize(vm::ptr<CellSyncQueue> queue)
return NOT_AN_ERROR(queue->ctrl.load().count & 0xffffff);
}
ppu_error_code cellSyncQueueClear(PPUThread& ppu, vm::ptr<CellSyncQueue> queue)
ppu_error_code cellSyncQueueClear(vm::ptr<CellSyncQueue> queue)
{
cellSync.trace("cellSyncQueueClear(queue=*0x%x)", queue);
@ -655,12 +657,12 @@ ppu_error_code cellSyncQueueClear(PPUThread& ppu, vm::ptr<CellSyncQueue> queue)
const u32 depth = queue->check_depth();
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_clear_begin_1)));
vm::wait_op(ppu, queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_clear_begin_2)));
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_clear_begin_1)));
vm::wait_op(queue.addr(), 8, WRAP_EXPR(queue->ctrl.atomic_op(_sync::queue::try_clear_begin_2)));
queue->ctrl.exchange({ 0, 0 });
vm::notify_at(queue.ptr(&CellSyncQueue::ctrl));
vm::notify_at(queue.addr(), 8);
return CELL_OK;
}

View file

@ -80,9 +80,20 @@ static const char* get_systemparam_id_name(s32 id)
return tls_id_name;
}
enum class systemparam_id_name : s32 {};
template<>
struct unveil<systemparam_id_name, void>
{
static inline const char* get(systemparam_id_name arg)
{
return get_systemparam_id_name((s32)arg);
}
};
s32 cellSysutilGetSystemParamInt(s32 id, vm::ptr<s32> value)
{
cellSysutil.warning("cellSysutilGetSystemParamInt(id=%s, value=*0x%x)", get_systemparam_id_name(id), value);
cellSysutil.warning("cellSysutilGetSystemParamInt(id=%s, value=*0x%x)", systemparam_id_name(id), value);
// TODO: load this information from config (preferably "sys/" group)

View file

@ -541,7 +541,7 @@ void vdecOpen(u32 vdec_id) // TODO: call from the constructor
vdec.vdecCb->cpu_init();
vdec.vdecCb->state -= cpu_state::stop;
vdec.vdecCb->safe_notify();
vdec.vdecCb->lock_notify();
}
s32 cellVdecQueryAttr(vm::cptr<CellVdecType> type, vm::ptr<CellVdecAttr> attr)

View file

@ -447,7 +447,7 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
ppu->cpu_init();
ppu->state -= cpu_state::stop;
ppu->safe_notify();
ppu->lock_notify();
return CELL_OK;
}

View file

@ -4,8 +4,6 @@
#include "sys_net.h"
#ifdef _WIN32
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0601
#include <winsock2.h>
#include <WS2tcpip.h>
#else
@ -122,7 +120,7 @@ namespace sys_net
// TODO
thread_local vm::ptr<_tls_data_t> g_tls_net_data{};
static void initialize_tls()
static never_inline void initialize_tls()
{
// allocate if not initialized
if (!g_tls_net_data)

View file

@ -4,6 +4,8 @@
#include "sysPrxForUser.h"
#include "Emu/Memory/wait_engine.h"
extern _log::channel sysPrxForUser;
void sys_spinlock_initialize(vm::ptr<atomic_be_t<u32>> lock)
@ -13,12 +15,12 @@ void sys_spinlock_initialize(vm::ptr<atomic_be_t<u32>> lock)
lock->exchange(0);
}
void sys_spinlock_lock(PPUThread& ppu, vm::ptr<atomic_be_t<u32>> lock)
void sys_spinlock_lock(vm::ptr<atomic_be_t<u32>> lock)
{
sysPrxForUser.trace("sys_spinlock_lock(lock=*0x%x)", lock);
// prx: exchange with 0xabadcafe, repeat until exchanged with 0
vm::wait_op(ppu, lock.addr(), 4, WRAP_EXPR(!lock->exchange(0xabadcafe)));
vm::wait_op(lock.addr(), 4, WRAP_EXPR(!lock->exchange(0xabadcafe)));
}
s32 sys_spinlock_trylock(vm::ptr<atomic_be_t<u32>> lock)

View file

@ -24,7 +24,7 @@ void CallbackManager::Async(async_cb_t func)
m_async_cb.emplace(std::move(func));
m_cb_thread->cv.notify_one();
m_cb_thread->notify();
}
CallbackManager::check_cb_t CallbackManager::Check()
@ -74,7 +74,7 @@ void CallbackManager::Init()
continue;
}
ppu.cv.wait(lock);
get_current_thread_cv().wait(lock);
}
};

View file

@ -2374,6 +2374,6 @@ s32 ppu_error_code::report(s32 error, const char* text)
}
}
LOG_ERROR(PPU, "Illegal call to ppu_report_error(0x%x, '%s')!");
LOG_ERROR(PPU, "Illegal call to ppu_error_code::report(0x%x, '%s')!");
return error;
}

View file

@ -56,7 +56,6 @@ public:
}
const g_ppu_scale_table;
void ppu_interpreter::TDI(PPUThread& ppu, ppu_opcode_t op)
{
const s64 a = ppu.GPR[op.ra], b = op.simm16;
@ -242,79 +241,79 @@ void ppu_interpreter::VCMPBFP(PPUThread& ppu, ppu_opcode_t op)
const auto cmp1 = _mm_cmpnle_ps(a, b);
const auto cmp2 = _mm_cmpnge_ps(a, _mm_xor_ps(b, sign));
ppu.VR[op.vd].vf = _mm_or_ps(_mm_and_ps(cmp1, sign), _mm_and_ps(cmp2, _mm_castsi128_ps(_mm_set1_epi32(0x40000000))));
if (op.oe) ppu.SetCR(6, false, false, _mm_movemask_ps(_mm_or_ps(cmp1, cmp2)) == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, false, false, _mm_movemask_ps(_mm_or_ps(cmp1, cmp2)) == 0, false);
}
void ppu_interpreter::VCMPEQFP(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_ps(ppu.VR[op.vd].vf = _mm_cmpeq_ps(ppu.VR[op.va].vf, ppu.VR[op.vb].vf));
if (op.oe) ppu.SetCR(6, rmask == 0xf, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xf, false, rmask == 0, false);
}
void ppu_interpreter::VCMPEQUB(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8((ppu.VR[op.vd] = v128::eq8(ppu.VR[op.va], ppu.VR[op.vb])).vi);
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCMPEQUH(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8((ppu.VR[op.vd] = v128::eq16(ppu.VR[op.va], ppu.VR[op.vb])).vi);
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCMPEQUW(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8((ppu.VR[op.vd] = v128::eq32(ppu.VR[op.va], ppu.VR[op.vb])).vi);
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCMPGEFP(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_ps(ppu.VR[op.vd].vf = _mm_cmpge_ps(ppu.VR[op.va].vf, ppu.VR[op.vb].vf));
if (op.oe) ppu.SetCR(6, rmask == 0xf, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xf, false, rmask == 0, false);
}
void ppu_interpreter::VCMPGTFP(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_ps(ppu.VR[op.vd].vf = _mm_cmpgt_ps(ppu.VR[op.va].vf, ppu.VR[op.vb].vf));
if (op.oe) ppu.SetCR(6, rmask == 0xf, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xf, false, rmask == 0, false);
}
void ppu_interpreter::VCMPGTSB(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8(ppu.VR[op.vd].vi = _mm_cmpgt_epi8(ppu.VR[op.va].vi, ppu.VR[op.vb].vi));
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCMPGTSH(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8(ppu.VR[op.vd].vi = _mm_cmpgt_epi16(ppu.VR[op.va].vi, ppu.VR[op.vb].vi));
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCMPGTSW(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8(ppu.VR[op.vd].vi = _mm_cmpgt_epi32(ppu.VR[op.va].vi, ppu.VR[op.vb].vi));
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCMPGTUB(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8(ppu.VR[op.vd].vi = sse_cmpgt_epu8(ppu.VR[op.va].vi, ppu.VR[op.vb].vi));
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCMPGTUH(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8(ppu.VR[op.vd].vi = sse_cmpgt_epu16(ppu.VR[op.va].vi, ppu.VR[op.vb].vi));
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCMPGTUW(PPUThread& ppu, ppu_opcode_t op)
{
const auto rmask = _mm_movemask_epi8(ppu.VR[op.vd].vi = sse_cmpgt_epu32(ppu.VR[op.va].vi, ppu.VR[op.vb].vi));
if (op.oe) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
if (UNLIKELY(op.oe)) ppu.SetCR(6, rmask == 0xffff, false, rmask == 0, false);
}
void ppu_interpreter::VCTSXS(PPUThread& ppu, ppu_opcode_t op)
@ -1510,7 +1509,7 @@ void ppu_interpreter::ADDIC(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(a, i);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.main & 1) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.main & 1)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::ADDI(PPUThread& ppu, ppu_opcode_t op)
@ -1649,19 +1648,19 @@ void ppu_interpreter::RLWIMI(PPUThread& ppu, ppu_opcode_t op)
{
const u64 mask = ppu_rotate_mask(32 + op.mb32, 32 + op.me32);
ppu.GPR[op.ra] = (ppu.GPR[op.ra] & ~mask) | (dup32(rol32(u32(ppu.GPR[op.rs]), op.sh32)) & mask);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::RLWINM(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = dup32(rol32(u32(ppu.GPR[op.rs]), op.sh32)) & ppu_rotate_mask(32 + op.mb32, 32 + op.me32);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::RLWNM(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = dup32(rol32(u32(ppu.GPR[op.rs]), ppu.GPR[op.rb] & 0x1f)) & ppu_rotate_mask(32 + op.mb32, 32 + op.me32);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::ORI(PPUThread& ppu, ppu_opcode_t op)
@ -1698,39 +1697,39 @@ void ppu_interpreter::ANDIS(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::RLDICL(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], op.sh64) & ppu_rotate_mask(op.mbe64, 63);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], op.sh64) & (~0ull >> op.mbe64);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::RLDICR(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], op.sh64) & ppu_rotate_mask(0, op.mbe64);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], op.sh64) & (~0ull << (op.mbe64 ^ 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::RLDIC(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], op.sh64) & ppu_rotate_mask(op.mbe64, op.sh64 ^ 63);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::RLDIMI(PPUThread& ppu, ppu_opcode_t op)
{
const u64 mask = ppu_rotate_mask(op.mbe64, op.sh64 ^ 63);
ppu.GPR[op.ra] = (ppu.GPR[op.ra] & ~mask) | (rol64(ppu.GPR[op.rs], op.sh64) & mask);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::RLDCL(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], ppu.GPR[op.rb] & 0x3f) & ppu_rotate_mask(op.mbe64, 63);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], ppu.GPR[op.rb] & 0x3f) & (~0ull >> op.mbe64);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::RLDCR(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], ppu.GPR[op.rb] & 0x3f) & ppu_rotate_mask(0, op.mbe64);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
ppu.GPR[op.ra] = rol64(ppu.GPR[op.rs], ppu.GPR[op.rb] & 0x3f) & (~0ull << (op.mbe64 ^ 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::CMP(PPUThread& ppu, ppu_opcode_t op)
@ -1801,14 +1800,14 @@ void ppu_interpreter::SUBFC(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(~RA, RB, 1);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.oe) ppu.SetOV((~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.oe)) ppu.SetOV((~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::MULHDU(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.rd] = UMULH64(ppu.GPR[op.ra], ppu.GPR[op.rb]);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
}
void ppu_interpreter::ADDC(PPUThread& ppu, ppu_opcode_t op)
@ -1818,8 +1817,8 @@ void ppu_interpreter::ADDC(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(RA, RB);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.oe) ppu.SetOV((RA >> 63 == RB >> 63) && (RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.oe)) ppu.SetOV((RA >> 63 == RB >> 63) && (RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::MULHWU(PPUThread& ppu, ppu_opcode_t op)
@ -1827,7 +1826,7 @@ void ppu_interpreter::MULHWU(PPUThread& ppu, ppu_opcode_t op)
u32 a = (u32)ppu.GPR[op.ra];
u32 b = (u32)ppu.GPR[op.rb];
ppu.GPR[op.rd] = ((u64)a * (u64)b) >> 32;
if (op.rc) ppu.SetCR(0, false, false, false, ppu.SO);
if (UNLIKELY(op.rc)) ppu.SetCR(0, false, false, false, ppu.SO);
}
void ppu_interpreter::MFOCRF(PPUThread& ppu, ppu_opcode_t op)
@ -1877,30 +1876,26 @@ void ppu_interpreter::LWZX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::SLW(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = u32(ppu.GPR[op.rs] << (ppu.GPR[op.rb] & 0x3f));
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::CNTLZW(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = cntlz32(u32(ppu.GPR[op.rs]));
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::SLD(PPUThread& ppu, ppu_opcode_t op)
{
u32 n = ppu.GPR[op.rb] & 0x3f;
u64 r = rol64(ppu.GPR[op.rs], n);
u64 m = (ppu.GPR[op.rb] & 0x40) ? 0 : ppu_rotate_mask(0, 63 - n);
ppu.GPR[op.ra] = r & m;
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
const u32 n = ppu.GPR[op.rb];
ppu.GPR[op.ra] = UNLIKELY(n & 0x40) ? 0 : ppu.GPR[op.rs] << n;
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::AND(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = ppu.GPR[op.rs] & ppu.GPR[op.rb];
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::CMPL(PPUThread& ppu, ppu_opcode_t op)
@ -1954,8 +1949,8 @@ void ppu_interpreter::SUBF(PPUThread& ppu, ppu_opcode_t op)
const u64 RA = ppu.GPR[op.ra];
const u64 RB = ppu.GPR[op.rb];
ppu.GPR[op.rd] = RB - RA;
if (op.oe) ppu.SetOV((~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
if (UNLIKELY(op.oe)) ppu.SetOV((~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
}
void ppu_interpreter::LDUX(PPUThread& ppu, ppu_opcode_t op)
@ -1979,13 +1974,13 @@ void ppu_interpreter::LWZUX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::CNTLZD(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = cntlz64(ppu.GPR[op.rs]);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::ANDC(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = ppu.GPR[op.rs] & ~ppu.GPR[op.rb];
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::TD(PPUThread& ppu, ppu_opcode_t op)
@ -2012,7 +2007,7 @@ void ppu_interpreter::LVEWX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::MULHD(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.rd] = MULH64(ppu.GPR[op.ra], ppu.GPR[op.rb]);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
}
void ppu_interpreter::MULHW(PPUThread& ppu, ppu_opcode_t op)
@ -2020,7 +2015,7 @@ void ppu_interpreter::MULHW(PPUThread& ppu, ppu_opcode_t op)
s32 a = (s32)ppu.GPR[op.ra];
s32 b = (s32)ppu.GPR[op.rb];
ppu.GPR[op.rd] = ((s64)a * (s64)b) >> 32;
if (op.rc) ppu.SetCR(0, false, false, false, ppu.SO);
if (UNLIKELY(op.rc)) ppu.SetCR(0, false, false, false, ppu.SO);
}
void ppu_interpreter::LDARX(PPUThread& ppu, ppu_opcode_t op)
@ -2053,8 +2048,8 @@ void ppu_interpreter::NEG(PPUThread& ppu, ppu_opcode_t op)
{
const u64 RA = ppu.GPR[op.ra];
ppu.GPR[op.rd] = 0 - RA;
if (op.oe) ppu.SetOV((~RA >> 63 == 0) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
if (UNLIKELY(op.oe)) ppu.SetOV((~RA >> 63 == 0) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
}
void ppu_interpreter::LBZUX(PPUThread& ppu, ppu_opcode_t op)
@ -2067,7 +2062,7 @@ void ppu_interpreter::LBZUX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::NOR(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = ~(ppu.GPR[op.rs] | ppu.GPR[op.rb]);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::STVEBX(PPUThread& ppu, ppu_opcode_t op)
@ -2084,8 +2079,8 @@ void ppu_interpreter::SUBFE(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(~RA, RB, ppu.CA);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.oe) ppu.SetOV((~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.oe)) ppu.SetOV((~RA >> 63 == RB >> 63) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::ADDE(PPUThread& ppu, ppu_opcode_t op)
@ -2095,8 +2090,8 @@ void ppu_interpreter::ADDE(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(RA, RB, ppu.CA);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.oe) ppu.SetOV((RA >> 63 == RB >> 63) && (RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.oe)) ppu.SetOV((RA >> 63 == RB >> 63) && (RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::MTOCRF(PPUThread& ppu, ppu_opcode_t op)
@ -2189,8 +2184,8 @@ void ppu_interpreter::SUBFZE(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(~RA, 0, ppu.CA);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.oe) ppu.SetOV((~RA >> 63 == 0) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.oe)) ppu.SetOV((~RA >> 63 == 0) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::ADDZE(PPUThread& ppu, ppu_opcode_t op)
@ -2199,8 +2194,8 @@ void ppu_interpreter::ADDZE(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(RA, 0, ppu.CA);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.oe) ppu.SetOV((RA >> 63 == 0) && (RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.oe)) ppu.SetOV((RA >> 63 == 0) && (RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::STDCX(PPUThread& ppu, ppu_opcode_t op)
@ -2228,12 +2223,12 @@ void ppu_interpreter::MULLD(PPUThread& ppu, ppu_opcode_t op)
const s64 RA = ppu.GPR[op.ra];
const s64 RB = ppu.GPR[op.rb];
ppu.GPR[op.rd] = (s64)(RA * RB);
if (op.oe)
if (UNLIKELY(op.oe))
{
const s64 high = MULH64(RA, RB);
ppu.SetOV(high != s64(ppu.GPR[op.rd]) >> 63);
}
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
}
void ppu_interpreter::SUBFME(PPUThread& ppu, ppu_opcode_t op)
@ -2242,8 +2237,8 @@ void ppu_interpreter::SUBFME(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(~RA, ~0ull, ppu.CA);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.oe) ppu.SetOV((~RA >> 63 == 1) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.oe)) ppu.SetOV((~RA >> 63 == 1) && (~RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::ADDME(PPUThread& ppu, ppu_opcode_t op)
@ -2252,15 +2247,15 @@ void ppu_interpreter::ADDME(PPUThread& ppu, ppu_opcode_t op)
const auto r = add64_flags(RA, ~0ull, ppu.CA);
ppu.GPR[op.rd] = r.result;
ppu.CA = r.carry;
if (op.oe) ppu.SetOV((u64(RA) >> 63 == 1) && (u64(RA) >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, r.result, 0);
if (UNLIKELY(op.oe)) ppu.SetOV((u64(RA) >> 63 == 1) && (u64(RA) >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, r.result, 0);
}
void ppu_interpreter::MULLW(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.rd] = (s64)((s64)(s32)ppu.GPR[op.ra] * (s64)(s32)ppu.GPR[op.rb]);
if (op.oe) ppu.SetOV(s64(ppu.GPR[op.rd]) < s64(-1) << 31 || s64(ppu.GPR[op.rd]) >= s64(1) << 31);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.oe)) ppu.SetOV(s64(ppu.GPR[op.rd]) < s64(-1) << 31 || s64(ppu.GPR[op.rd]) >= s64(1) << 31);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::DCBTST(PPUThread& ppu, ppu_opcode_t op)
@ -2279,8 +2274,8 @@ void ppu_interpreter::ADD(PPUThread& ppu, ppu_opcode_t op)
const u64 RA = ppu.GPR[op.ra];
const u64 RB = ppu.GPR[op.rb];
ppu.GPR[op.rd] = RA + RB;
if (op.oe) ppu.SetOV((RA >> 63 == RB >> 63) && (RA >> 63 != ppu.GPR[op.rd] >> 63));
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
if (UNLIKELY(op.oe)) ppu.SetOV((RA >> 63 == RB >> 63) && (RA >> 63 != ppu.GPR[op.rd] >> 63));
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
}
void ppu_interpreter::DCBT(PPUThread& ppu, ppu_opcode_t op)
@ -2296,7 +2291,7 @@ void ppu_interpreter::LHZX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::EQV(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = ~(ppu.GPR[op.rs] ^ ppu.GPR[op.rb]);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::ECIWX(PPUThread& ppu, ppu_opcode_t op)
@ -2314,7 +2309,7 @@ void ppu_interpreter::LHZUX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::XOR(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = ppu.GPR[op.rs] ^ ppu.GPR[op.rb];
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::MFSPR(PPUThread& ppu, ppu_opcode_t op)
@ -2396,7 +2391,7 @@ void ppu_interpreter::STHX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::ORC(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = ppu.GPR[op.rs] | ~ppu.GPR[op.rb];
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::ECOWX(PPUThread& ppu, ppu_opcode_t op)
@ -2414,7 +2409,7 @@ void ppu_interpreter::STHUX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::OR(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = ppu.GPR[op.rs] | ppu.GPR[op.rb];
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::DIVDU(PPUThread& ppu, ppu_opcode_t op)
@ -2422,8 +2417,8 @@ void ppu_interpreter::DIVDU(PPUThread& ppu, ppu_opcode_t op)
const u64 RA = ppu.GPR[op.ra];
const u64 RB = ppu.GPR[op.rb];
ppu.GPR[op.rd] = RB == 0 ? 0 : RA / RB;
if (op.oe) ppu.SetOV(RB == 0);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
if (UNLIKELY(op.oe)) ppu.SetOV(RB == 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
}
void ppu_interpreter::DIVWU(PPUThread& ppu, ppu_opcode_t op)
@ -2431,8 +2426,8 @@ void ppu_interpreter::DIVWU(PPUThread& ppu, ppu_opcode_t op)
const u32 RA = (u32)ppu.GPR[op.ra];
const u32 RB = (u32)ppu.GPR[op.rb];
ppu.GPR[op.rd] = RB == 0 ? 0 : RA / RB;
if (op.oe) ppu.SetOV(RB == 0);
if (op.rc) ppu.SetCR(0, false, false, false, ppu.SO);
if (UNLIKELY(op.oe)) ppu.SetOV(RB == 0);
if (UNLIKELY(op.rc)) ppu.SetCR(0, false, false, false, ppu.SO);
}
void ppu_interpreter::MTSPR(PPUThread& ppu, ppu_opcode_t op)
@ -2465,7 +2460,7 @@ void ppu_interpreter::DCBI(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::NAND(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = ~(ppu.GPR[op.rs] & ppu.GPR[op.rb]);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::STVXL(PPUThread& ppu, ppu_opcode_t op)
@ -2480,8 +2475,8 @@ void ppu_interpreter::DIVD(PPUThread& ppu, ppu_opcode_t op)
const s64 RB = ppu.GPR[op.rb];
const bool o = RB == 0 || ((u64)RA == (1ULL << 63) && RB == -1);
ppu.GPR[op.rd] = o ? 0 : RA / RB;
if (op.oe) ppu.SetOV(o);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
if (UNLIKELY(op.oe)) ppu.SetOV(o);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.rd], 0);
}
void ppu_interpreter::DIVW(PPUThread& ppu, ppu_opcode_t op)
@ -2490,8 +2485,8 @@ void ppu_interpreter::DIVW(PPUThread& ppu, ppu_opcode_t op)
const s32 RB = (s32)ppu.GPR[op.rb];
const bool o = RB == 0 || ((u32)RA == (1 << 31) && RB == -1);
ppu.GPR[op.rd] = o ? 0 : u32(RA / RB);
if (op.oe) ppu.SetOV(o);
if (op.rc) ppu.SetCR(0, false, false, false, ppu.SO);
if (UNLIKELY(op.oe)) ppu.SetOV(o);
if (UNLIKELY(op.rc)) ppu.SetCR(0, false, false, false, ppu.SO);
}
void ppu_interpreter::LVLX(PPUThread& ppu, ppu_opcode_t op)
@ -2544,17 +2539,14 @@ void ppu_interpreter::LFSX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::SRW(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = (ppu.GPR[op.rs] & 0xffffffff) >> (ppu.GPR[op.rb] & 0x3f);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::SRD(PPUThread& ppu, ppu_opcode_t op)
{
u32 n = ppu.GPR[op.rb] & 0x3f;
u64 r = rol64(ppu.GPR[op.rs], 64 - n);
u64 m = (ppu.GPR[op.rb] & 0x40) ? 0 : ppu_rotate_mask(n, 63);
ppu.GPR[op.ra] = r & m;
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
const u32 n = ppu.GPR[op.rb];
ppu.GPR[op.ra] = UNLIKELY(n & 0x40) ? 0 : ppu.GPR[op.rs] >> n;
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::LVRX(PPUThread& ppu, ppu_opcode_t op)
@ -2754,7 +2746,7 @@ void ppu_interpreter::SRAW(PPUThread& ppu, ppu_opcode_t op)
ppu.CA = (RS < 0) && ((ppu.GPR[op.ra] << shift) != RS);
}
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::SRAD(PPUThread& ppu, ppu_opcode_t op)
@ -2772,7 +2764,7 @@ void ppu_interpreter::SRAD(PPUThread& ppu, ppu_opcode_t op)
ppu.CA = (RS < 0) && ((ppu.GPR[op.ra] << shift) != RS);
}
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::LVRXL(PPUThread& ppu, ppu_opcode_t op)
@ -2794,7 +2786,7 @@ void ppu_interpreter::SRAWI(PPUThread& ppu, ppu_opcode_t op)
ppu.GPR[op.ra] = RS >> op.sh32;
ppu.CA = (RS < 0) && ((u32)(ppu.GPR[op.ra] << op.sh32) != RS);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::SRADI(PPUThread& ppu, ppu_opcode_t op)
@ -2804,7 +2796,7 @@ void ppu_interpreter::SRADI(PPUThread& ppu, ppu_opcode_t op)
ppu.GPR[op.ra] = RS >> sh;
ppu.CA = (RS < 0) && ((ppu.GPR[op.ra] << sh) != RS);
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::EIEIO(PPUThread& ppu, ppu_opcode_t op)
@ -2829,7 +2821,7 @@ void ppu_interpreter::STHBRX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::EXTSH(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = (s64)(s16)ppu.GPR[op.rs];
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::STVRXL(PPUThread& ppu, ppu_opcode_t op)
@ -2843,7 +2835,7 @@ void ppu_interpreter::STVRXL(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::EXTSB(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = (s64)(s8)ppu.GPR[op.rs];
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::STFIWX(PPUThread& ppu, ppu_opcode_t op)
@ -2855,7 +2847,7 @@ void ppu_interpreter::STFIWX(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::EXTSW(PPUThread& ppu, ppu_opcode_t op)
{
ppu.GPR[op.ra] = (s64)(s32)ppu.GPR[op.rs];
if (op.rc) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
if (UNLIKELY(op.rc)) ppu.SetCR<s64>(0, ppu.GPR[op.ra], 0);
}
void ppu_interpreter::ICBI(PPUThread& ppu, ppu_opcode_t op)
@ -3049,66 +3041,6 @@ void ppu_interpreter::LWA(PPUThread& ppu, ppu_opcode_t op)
ppu.GPR[op.rd] = (s64)(s32)vm::read32(vm::cast(addr, HERE));
}
void ppu_interpreter::FDIVS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] / ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FSUBS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] - ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FADDS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] + ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FSQRTS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(sqrt(ppu.FPR[op.frb]));
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FRES(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(1.0 / ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMULS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] * ppu.FPR[op.frc]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMADDS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] * ppu.FPR[op.frc] + ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMSUBS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] * ppu.FPR[op.frc] - ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FNMSUBS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(-(ppu.FPR[op.fra] * ppu.FPR[op.frc]) + ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FNMADDS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(-(ppu.FPR[op.fra] * ppu.FPR[op.frc]) - ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::STD(PPUThread& ppu, ppu_opcode_t op)
{
const u64 addr = (op.simm16 & ~3) + (op.ra ? ppu.GPR[op.ra] : 0);
@ -3122,10 +3054,70 @@ void ppu_interpreter::STDU(PPUThread& ppu, ppu_opcode_t op)
ppu.GPR[op.ra] = addr;
}
void ppu_interpreter::FDIVS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] / ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FSUBS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] - ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FADDS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] + ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FSQRTS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(sqrt(ppu.FPR[op.frb]));
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FRES(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(1.0 / ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMULS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] * ppu.FPR[op.frc]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMADDS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] * ppu.FPR[op.frc] + ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMSUBS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.fra] * ppu.FPR[op.frc] - ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FNMSUBS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(-(ppu.FPR[op.fra] * ppu.FPR[op.frc]) + ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FNMADDS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(-(ppu.FPR[op.fra] * ppu.FPR[op.frc]) - ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::MTFSB1(PPUThread& ppu, ppu_opcode_t op)
{
LOG_WARNING(PPU, "MTFSB1");
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::MCRFS(PPUThread& ppu, ppu_opcode_t op)
@ -3137,26 +3129,26 @@ void ppu_interpreter::MCRFS(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::MTFSB0(PPUThread& ppu, ppu_opcode_t op)
{
LOG_WARNING(PPU, "MTFSB0");
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::MTFSFI(PPUThread& ppu, ppu_opcode_t op)
{
LOG_WARNING(PPU, "MTFSFI");
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::MFFS(PPUThread& ppu, ppu_opcode_t op)
{
LOG_WARNING(PPU, "MFFS");
ppu.FPR[op.frd] = 0.0;
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::MTFSF(PPUThread& ppu, ppu_opcode_t op)
{
LOG_WARNING(PPU, "MTFSF");
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FCMPU(PPUThread& ppu, ppu_opcode_t op)
@ -3173,85 +3165,85 @@ void ppu_interpreter::FCMPU(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::FRSP(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = f32(ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FCTIW(PPUThread& ppu, ppu_opcode_t op)
{
(s32&)ppu.FPR[op.frd] = lrint(ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FCTIWZ(PPUThread& ppu, ppu_opcode_t op)
{
(s32&)ppu.FPR[op.frd] = static_cast<s32>(ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FDIV(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = ppu.FPR[op.fra] / ppu.FPR[op.frb];
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FSUB(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = ppu.FPR[op.fra] - ppu.FPR[op.frb];
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FADD(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = ppu.FPR[op.fra] + ppu.FPR[op.frb];
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FSQRT(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = sqrt(ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FSEL(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = ppu.FPR[op.fra] >= 0.0 ? ppu.FPR[op.frc] : ppu.FPR[op.frb];
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMUL(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = ppu.FPR[op.fra] * ppu.FPR[op.frc];
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FRSQRTE(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = 1.0 / sqrt(ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMSUB(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = ppu.FPR[op.fra] * ppu.FPR[op.frc] - ppu.FPR[op.frb];
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMADD(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = ppu.FPR[op.fra] * ppu.FPR[op.frc] + ppu.FPR[op.frb];
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FNMSUB(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = -(ppu.FPR[op.fra] * ppu.FPR[op.frc]) + ppu.FPR[op.frb];
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FNMADD(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = -(ppu.FPR[op.fra] * ppu.FPR[op.frc]) - ppu.FPR[op.frb];
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FCMPO(PPUThread& ppu, ppu_opcode_t op)
@ -3262,43 +3254,43 @@ void ppu_interpreter::FCMPO(PPUThread& ppu, ppu_opcode_t op)
void ppu_interpreter::FNEG(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = -ppu.FPR[op.frb];
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FMR(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = ppu.FPR[op.frb];
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FNABS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = -fabs(ppu.FPR[op.frb]);
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FABS(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = fabs(ppu.FPR[op.frb]);
if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FCTID(PPUThread& ppu, ppu_opcode_t op)
{
(s64&)ppu.FPR[op.frd] = llrint(ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FCTIDZ(PPUThread& ppu, ppu_opcode_t op)
{
(s64&)ppu.FPR[op.frd] = static_cast<s64>(ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}
void ppu_interpreter::FCFID(PPUThread& ppu, ppu_opcode_t op)
{
ppu.FPR[op.frd] = static_cast<double>((s64&)ppu.FPR[op.frb]);
ASSERT(!op.rc); //if (op.rc) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
ASSERT(!op.rc); //if (UNLIKELY(op.rc)) ppu.SetCR(1, ppu.FG, ppu.FL, ppu.FE, ppu.FU);
}

View file

@ -108,15 +108,15 @@ void PPUThread::cpu_task()
while (true)
{
if (_pc == PC && !state.load())
if (LIKELY(_pc == PC && !state.load()))
{
func0(*this, { op0 });
if ((_pc += 4) == (PC += 4) && !state.load())
if (LIKELY((_pc += 4) == (PC += 4) && !state.load()))
{
func1(*this, { op1 });
if ((_pc += 4) == (PC += 4))
if (LIKELY((_pc += 4) == (PC += 4)))
{
op0 = op2;
func0 = func2;
@ -135,7 +135,7 @@ void PPUThread::cpu_task()
func1 = table[ppu_decode(op1 = ops[1])];
func2 = table[ppu_decode(op2 = ops[2])];
if (check_status()) return;
if (UNLIKELY(check_status())) return;
}
}
@ -152,6 +152,11 @@ PPUThread::~PPUThread()
}
}
PPUThread::PPUThread(const std::string& name)
: cpu_thread(cpu_type::ppu, name)
{
}
be_t<u64>* PPUThread::get_stack_arg(s32 i, u64 align)
{
if (align != 1 && align != 2 && align != 4 && align != 8 && align != 16) throw fmt::exception("Unsupported alignment: 0x%llx" HERE, align);

View file

@ -12,14 +12,10 @@ public:
virtual void cpu_init() override;
virtual void cpu_task() override;
virtual bool handle_interrupt() override;
PPUThread(const std::string& name)
: cpu_thread(cpu_type::ppu, name)
{
}
virtual ~PPUThread() override;
PPUThread(const std::string& name);
u64 GPR[32]{}; // General-Purpose Registers
f64 FPR[32]{}; // Floating Point Registers
v128 VR[32]{}; // Vector Registers

View file

@ -24,6 +24,24 @@ void RawSPUThread::cpu_task()
npc = pc | ((ch_event_stat & SPU_EVENT_INTR_ENABLED) != 0);
}
void RawSPUThread::on_init()
{
if (!offset)
{
// Install correct SPU index and LS address
const_cast<u32&>(index) = id;
const_cast<u32&>(offset) = vm::falloc(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index, 0x40000);
ASSERT(offset);
SPUThread::on_init();
}
}
RawSPUThread::RawSPUThread(const std::string& name)
: SPUThread(name)
{
}
bool RawSPUThread::read_reg(const u32 addr, u32& value)
{
const u32 offset = addr - RAW_SPU_BASE_ADDR - index * RAW_SPU_OFFSET - RAW_SPU_PROB_OFFSET;
@ -83,7 +101,7 @@ bool RawSPUThread::write_reg(const u32 addr, const u32 value)
}))
{
state -= cpu_state::stop;
safe_notify();
lock_notify();
}
};

View file

@ -14,23 +14,9 @@ public:
static constexpr u32 id_min = 0;
static constexpr u32 id_max = 4;
void on_init() override
{
if (!offset)
{
// Install correct SPU index and LS address
const_cast<u32&>(index) = id;
const_cast<u32&>(offset) = vm::falloc(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index, 0x40000);
ASSERT(offset);
void on_init() override;
SPUThread::on_init();
}
}
RawSPUThread(const std::string& name)
: SPUThread(name)
{
}
RawSPUThread(const std::string& name);
bool read_reg(const u32 addr, u32& value);
bool write_reg(const u32 addr, const u32 value);

View file

@ -58,7 +58,7 @@ std::shared_ptr<spu_function_t> SPUDatabase::analyse(const be_t<u32>* ls, u32 en
}
}
std::lock_guard<shared_mutex> lock(m_mutex);
writer_lock lock(m_mutex);
// Double-check
if (auto func = find(ls + entry / 4, key, max_limit - entry))

View file

@ -16,6 +16,8 @@
#include "Emu/Cell/SPUInterpreter.h"
#include "Emu/Cell/SPURecompiler.h"
#include "Emu/Memory/wait_engine.h"
#include <cfenv>
extern u64 get_timebased_time();
@ -52,8 +54,7 @@ void spu_int_ctrl_t::set(u64 ints)
if (tag && tag->handler)
{
tag->handler->signal++;
tag->handler->thread->cv.notify_one();
tag->handler->thread->notify();
}
}
}
@ -167,7 +168,20 @@ void SPUThread::cpu_task()
}
}
SPUThread::SPUThread(const std::string & name, u32 index)
SPUThread::~SPUThread()
{
// Deallocate Local Storage
vm::dealloc_verbose_nothrow(offset);
}
SPUThread::SPUThread(const std::string& name)
: cpu_thread(cpu_type::spu, name)
, index(0)
, offset(0)
{
}
SPUThread::SPUThread(const std::string& name, u32 index)
: cpu_thread(cpu_type::spu, name)
, index(index)
, offset(vm::alloc(0x40000, vm::main))
@ -175,12 +189,6 @@ SPUThread::SPUThread(const std::string & name, u32 index)
Ensures(offset);
}
SPUThread::~SPUThread()
{
// Deallocate Local Storage
vm::dealloc_verbose_nothrow(offset);
}
void SPUThread::push_snr(u32 number, u32 value)
{
// Get channel
@ -430,7 +438,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
u32 SPUThread::get_events(bool waiting)
{
// check reservation status and set SPU_EVENT_LR if lost
if (last_raddr != 0 && !vm::reservation_test(get_thread_ctrl()))
if (last_raddr != 0 && !vm::reservation_test(operator->()))
{
ch_event_stat |= SPU_EVENT_LR;
@ -473,11 +481,11 @@ void SPUThread::set_events(u32 mask)
// notify if some events were set
if (~old_stat & mask && old_stat & SPU_EVENT_WAITING)
{
std::lock_guard<std::mutex> lock(mutex);
std::lock_guard<std::mutex> lock(get_current_thread_mutex());
if (ch_event_stat & SPU_EVENT_WAITING)
{
cv.notify_one();
notify();
}
}
}
@ -530,7 +538,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
auto read_channel = [&](spu_channel_t& channel)
{
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
std::unique_lock<std::mutex> lock(get_current_thread_mutex(), std::defer_lock);
while (true)
{
@ -552,7 +560,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
continue;
}
cv.wait(lock);
get_current_thread_cv().wait(lock);
}
};
@ -563,7 +571,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
// break;
case SPU_RdInMbox:
{
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
std::unique_lock<std::mutex> lock(get_current_thread_mutex(), std::defer_lock);
while (true)
{
@ -590,7 +598,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
continue;
}
cv.wait(lock);
get_current_thread_cv().wait(lock);
}
}
@ -639,7 +647,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
case SPU_RdEventStat:
{
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
std::unique_lock<std::mutex> lock(get_current_thread_mutex(), std::defer_lock);
// start waiting or return immediately
if (u32 res = get_events(true))
@ -651,7 +659,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
if (ch_event_mask & SPU_EVENT_LR)
{
// register waiter if polling reservation status is required
vm::wait_op(*this, last_raddr, 128, WRAP_EXPR(get_events(true) || state & cpu_state::stop));
vm::wait_op(last_raddr, 128, WRAP_EXPR(get_events(true) || state & cpu_state::stop));
}
else
{
@ -662,7 +670,7 @@ bool SPUThread::get_ch_value(u32 ch, u32& out)
{
CHECK_EMU_STATUS;
cv.wait(lock);
get_current_thread_cv().wait(lock);
}
}
@ -702,7 +710,7 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
{
if (offset >= RAW_SPU_BASE_ADDR)
{
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
std::unique_lock<std::mutex> lock(get_current_thread_mutex(), std::defer_lock);
while (!ch_out_intr_mbox.try_push(value))
{
@ -719,7 +727,7 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
continue;
}
cv.wait(lock);
get_current_thread_cv().wait(lock);
}
int_ctrl[2].set(SPU_INT2_STAT_MAILBOX_INT);
@ -909,7 +917,7 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
case SPU_WrOutMbox:
{
std::unique_lock<std::mutex> lock(mutex, std::defer_lock);
std::unique_lock<std::mutex> lock(get_current_thread_mutex(), std::defer_lock);
while (!ch_out_mbox.try_push(value))
{
@ -926,7 +934,7 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
continue;
}
cv.wait(lock);
get_current_thread_cv().wait(lock);
}
return true;
@ -1226,7 +1234,7 @@ bool SPUThread::stop_and_signal(u32 code)
return false;
}
cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
// event data must be set by push()
@ -1251,7 +1259,7 @@ bool SPUThread::stop_and_signal(u32 code)
if (thread && thread.get() != this)
{
thread->state -= cpu_state::suspend;
thread->safe_notify();
thread->lock_notify();
}
}
@ -1290,7 +1298,7 @@ bool SPUThread::stop_and_signal(u32 code)
if (thread && thread.get() != this)
{
thread->state += cpu_state::stop;
thread->safe_notify();
thread->lock_notify();
}
}
@ -1393,3 +1401,29 @@ void SPUThread::fast_call(u32 ls_addr)
gpr[1]._u32[3] = old_stack;
custom_task = std::move(old_task);
}
void SPUThread::RegisterHleFunction(u32 addr, std::function<bool(SPUThread&SPU)> function)
{
m_addr_to_hle_function_map[addr] = function;
_ref<u32>(addr) = 0x00000003; // STOP 3
}
void SPUThread::UnregisterHleFunction(u32 addr)
{
m_addr_to_hle_function_map.erase(addr);
}
void SPUThread::UnregisterHleFunctions(u32 start_addr, u32 end_addr)
{
for (auto iter = m_addr_to_hle_function_map.begin(); iter != m_addr_to_hle_function_map.end();)
{
if (iter->first >= start_addr && iter->first <= end_addr)
{
m_addr_to_hle_function_map.erase(iter++);
}
else
{
iter++;
}
}
}

View file

@ -180,7 +180,7 @@ public:
data.value |= value;
});
if (old.wait) spu.safe_notify();
if (old.wait) spu->lock_notify();
}
// push unconditionally (overwriting previous value), may require notification
@ -193,7 +193,7 @@ public:
data.value = value;
});
if (old.wait) spu.safe_notify();
if (old.wait) spu->lock_notify();
}
// returns true on success
@ -228,7 +228,7 @@ public:
// value is not cleared and may be read again
});
if (old.wait) spu.safe_notify();
if (old.wait) spu->lock_notify();
return old.value;
}
@ -299,7 +299,7 @@ public:
return false;
}))
{
spu.safe_notify();
spu->lock_notify();
}
}
@ -554,20 +554,14 @@ public:
virtual std::string dump() const override;
virtual void cpu_init() override;
virtual void cpu_task() override;
virtual ~SPUThread() override;
protected:
SPUThread(const std::string& name)
: cpu_thread(cpu_type::spu, name)
, index(0)
, offset(0)
{
}
SPUThread(const std::string& name);
public:
SPUThread(const std::string& name, u32 index);
virtual ~SPUThread() override;
std::array<v128, 128> gpr; // General-Purpose Registers
SPU_FPSCR fpscr;
@ -651,29 +645,7 @@ public:
return *_ptr<T>(lsa);
}
void RegisterHleFunction(u32 addr, std::function<bool(SPUThread & SPU)> function)
{
m_addr_to_hle_function_map[addr] = function;
_ref<u32>(addr) = 0x00000003; // STOP 3
}
void UnregisterHleFunction(u32 addr)
{
m_addr_to_hle_function_map.erase(addr);
}
void UnregisterHleFunctions(u32 start_addr, u32 end_addr)
{
for (auto iter = m_addr_to_hle_function_map.begin(); iter != m_addr_to_hle_function_map.end();)
{
if (iter->first >= start_addr && iter->first <= end_addr)
{
m_addr_to_hle_function_map.erase(iter++);
}
else
{
iter++;
}
}
}
void RegisterHleFunction(u32 addr, std::function<bool(SPUThread & SPU)> function);
void UnregisterHleFunction(u32 addr);
void UnregisterHleFunctions(u32 start_addr, u32 end_addr);
};

54
rpcs3/Emu/Cell/lv2/IPC.h Normal file
View file

@ -0,0 +1,54 @@
#pragma once
#include <memory>
#include <unordered_map>
#include "Utilities/SharedMutex.h"
// IPC manager for lv2 objects of type T and 64-bit IPC keys.
// External declaration of g_ipc is required.
template<typename T>
class ipc_manager final
{
std::unordered_map<u64, std::weak_ptr<T>> m_map;
shared_mutex m_mutex;
static ipc_manager g_ipc;
public:
// Add new object if specified ipc_key is not used
template<typename F>
static auto add(u64 ipc_key, F&& provider) -> decltype(static_cast<std::shared_ptr<T>>(provider()))
{
writer_lock lock(g_ipc.m_mutex);
// Get object location
std::weak_ptr<T>& wptr = g_ipc.m_map[ipc_key];
if (wptr.expired())
{
// Call a function which must return the object
std::shared_ptr<T> result = provider();
wptr = result;
return result;
}
return{};
}
// Get existing object with specified ipc_key
static std::shared_ptr<T> get(u64 ipc_key)
{
reader_lock lock(g_ipc.m_mutex);
const auto found = g_ipc.m_map.find(ipc_key);
if (found != g_ipc.m_map.end())
{
return found->second.lock();
}
return{};
}
};

View file

@ -25,7 +25,7 @@ void lv2_cond_t::notify(lv2_lock_t, cpu_thread* thread)
mutex->owner = std::static_pointer_cast<cpu_thread>(thread->shared_from_this());
ASSERT(!thread->state.test_and_set(cpu_state::signal));
thread->cv.notify_one();
thread->notify();
}
}
@ -221,11 +221,11 @@ s32 sys_cond_wait(PPUThread& ppu, u32 cond_id, u64 timeout)
continue;
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}

View file

@ -9,17 +9,13 @@
#include "Emu/Cell/SPUThread.h"
#include "sys_process.h"
#include "sys_event.h"
#include "IPC.h"
LOG_CHANNEL(sys_event);
extern u64 get_system_time();
template<> DECLARE(ipc_manager<lv2_event_queue_t>::g_ipc) {};
static ipc_manager<lv2_event_queue_t>& get_ipc_manager()
{
// Use magic static
static ipc_manager<lv2_event_queue_t> instance;
return instance;
}
extern u64 get_system_time();
std::shared_ptr<lv2_event_queue_t> lv2_event_queue_t::make(u32 protocol, s32 type, u64 name, u64 ipc_key, s32 size)
{
@ -32,7 +28,7 @@ std::shared_ptr<lv2_event_queue_t> lv2_event_queue_t::make(u32 protocol, s32 typ
}
// IPC queue
return get_ipc_manager().add(ipc_key, make_expr);
return ipc_manager<lv2_event_queue_t>::add(ipc_key, make_expr);
}
std::shared_ptr<lv2_event_queue_t> lv2_event_queue_t::find(u64 ipc_key)
@ -43,7 +39,7 @@ std::shared_ptr<lv2_event_queue_t> lv2_event_queue_t::find(u64 ipc_key)
return{};
}
return get_ipc_manager().get(ipc_key);
return ipc_manager<lv2_event_queue_t>::get(ipc_key);
}
void lv2_event_queue_t::push(lv2_lock_t, u64 source, u64 data1, u64 data2, u64 data3)
@ -82,7 +78,7 @@ void lv2_event_queue_t::push(lv2_lock_t, u64 source, u64 data1, u64 data2, u64 d
}
ASSERT(!thread->state.test_and_set(cpu_state::signal));
thread->cv.notify_one();
thread->notify();
return m_sq.pop_front();
}
@ -175,7 +171,7 @@ s32 sys_event_queue_destroy(u32 equeue_id, s32 mode)
}
thread->state += cpu_state::signal;
thread->cv.notify_one();
thread->notify();
}
return CELL_OK;
@ -264,11 +260,11 @@ s32 sys_event_queue_receive(PPUThread& ppu, u32 equeue_id, vm::ptr<sys_event_t>
return CELL_ETIMEDOUT;
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}

View file

@ -29,7 +29,7 @@ void lv2_event_flag_t::notify_all(lv2_lock_t)
ppu.GPR[4] = clear_pattern(bitptn, mode);
ASSERT(!thread->state.test_and_set(cpu_state::signal));
thread->cv.notify_one();
thread->notify();
return true;
}
@ -160,11 +160,11 @@ s32 sys_event_flag_wait(PPUThread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u6
return CELL_ETIMEDOUT;
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}
@ -292,7 +292,7 @@ s32 sys_event_flag_cancel(u32 id, vm::ptr<u32> num)
ppu.GPR[5] = 0;
ASSERT(!thread->state.test_and_set(cpu_state::signal));
thread->cv.notify_one();
thread->notify();
}
eflag->sq.clear();

View file

@ -14,15 +14,14 @@ void lv2_int_serv_t::join(PPUThread& ppu, lv2_lock_t lv2_lock)
{
// Use is_joining to stop interrupt thread and signal
thread->is_joining = true;
thread->cv.notify_one();
thread->notify();
// Start joining
while (!(thread->state & cpu_state::exit))
{
CHECK_EMU_STATUS;
ppu.cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::milliseconds(1));
}
// Cleanup
@ -122,14 +121,14 @@ s32 _sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u32 intrthread
continue;
}
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
ppu.state += cpu_state::exit;
};
it->state -= cpu_state::stop;
it->safe_notify();
it->lock_notify();
*ih = handler->id;

View file

@ -30,7 +30,7 @@ void lv2_lwcond_t::notify(lv2_lock_t, cpu_thread* thread, const std::shared_ptr<
}
ASSERT(!thread->state.test_and_set(cpu_state::signal));
thread->cv.notify_one();
thread->notify();
}
s32 _sys_lwcond_create(vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name, u32 arg5)
@ -202,11 +202,11 @@ s32 _sys_lwcond_queue_wait(PPUThread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 ti
}
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}

View file

@ -23,7 +23,7 @@ void lv2_lwmutex_t::unlock(lv2_lock_t)
{
auto& thread = sq.front();
ASSERT(!thread->state.test_and_set(cpu_state::signal));
thread->cv.notify_one();
thread->notify();
sq.pop_front();
}
@ -114,11 +114,11 @@ s32 _sys_lwmutex_lock(PPUThread& ppu, u32 lwmutex_id, u64 timeout)
return CELL_ETIMEDOUT;
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}

View file

@ -22,7 +22,7 @@ void lv2_mutex_t::unlock(lv2_lock_t)
owner = std::static_pointer_cast<cpu_thread>(sq.front()->shared_from_this());
ASSERT(!owner->state.test_and_set(cpu_state::signal));
owner->cv.notify_one();
owner->notify();
}
}
@ -149,11 +149,11 @@ s32 sys_mutex_lock(PPUThread& ppu, u32 mutex_id, u64 timeout)
return CELL_ETIMEDOUT;
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}

View file

@ -6,7 +6,6 @@
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_mutex.h"
#include "sys_ppu_thread.h"
LOG_CHANNEL(sys_ppu_thread);
@ -17,24 +16,25 @@ void _sys_ppu_thread_exit(PPUThread& ppu, u64 errorcode)
LV2_LOCK;
// get all sys_mutex objects
for (auto& mutex : idm::get_all<lv2_mutex_t>())
{
// unlock mutex if locked by this thread
if (mutex->owner.get() == &ppu)
{
mutex->unlock(lv2_lock);
}
}
// TODO: Should we really unlock mutexes?
//// get all sys_mutex objects
//for (auto& mutex : idm::get_all<lv2_mutex_t>())
//{
// // unlock mutex if locked by this thread
// if (mutex->owner.get() == &ppu)
// {
// mutex->unlock(lv2_lock);
// }
//}
ppu.state += cpu_state::exit;
// Delete detached thread
if (!ppu.is_joinable)
{
idm::remove<PPUThread>(ppu.id);
}
else
{
ppu.state += cpu_state::exit;
}
// Throw if this syscall was not called directly by the SC instruction (hack)
if (ppu.GPR[11] != 41 || ppu.custom_task)
@ -81,7 +81,7 @@ s32 sys_ppu_thread_join(PPUThread& ppu, u32 thread_id, vm::ptr<u64> vptr)
{
CHECK_EMU_STATUS;
ppu.cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::milliseconds(1));
}
// get exit status from the register
@ -236,7 +236,7 @@ u32 ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, const std::st
ppu->GPR[3] = arg;
ppu->state -= cpu_state::stop;
ppu->safe_notify();
ppu->lock_notify();
return ppu->id;
}
@ -294,7 +294,7 @@ s32 sys_ppu_thread_start(u32 thread_id)
}
thread->state -= cpu_state::stop;
thread->safe_notify();
thread->lock_notify();
return CELL_OK;
}

View file

@ -1,6 +1,6 @@
#pragma once
namespace vm { using namespace ps3; }
#include "sys_sync.h"
class PPUThread;

View file

@ -20,7 +20,7 @@ void lv2_rwlock_t::notify_all(lv2_lock_t)
writer = std::static_pointer_cast<cpu_thread>(wsq.front()->shared_from_this());
ASSERT(!writer->state.test_and_set(cpu_state::signal));
writer->cv.notify_one();
writer->notify();
return wsq.pop_front();
}
@ -33,7 +33,7 @@ void lv2_rwlock_t::notify_all(lv2_lock_t)
for (auto& thread : rsq)
{
ASSERT(!thread->state.test_and_set(cpu_state::signal));
thread->cv.notify_one();
thread->notify();
}
return rsq.clear();
@ -132,11 +132,11 @@ s32 sys_rwlock_rlock(PPUThread& ppu, u32 rw_lock_id, u64 timeout)
return CELL_ETIMEDOUT;
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}
@ -255,11 +255,11 @@ s32 sys_rwlock_wlock(PPUThread& ppu, u32 rw_lock_id, u64 timeout)
return CELL_ETIMEDOUT;
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}

View file

@ -107,11 +107,11 @@ s32 sys_semaphore_wait(PPUThread& ppu, u32 sem_id, u64 timeout)
return CELL_ETIMEDOUT;
}
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
get_current_thread_cv().wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
}
else
{
ppu.cv.wait(lv2_lock);
get_current_thread_cv().wait(lv2_lock);
}
}
@ -175,7 +175,7 @@ s32 sys_semaphore_post(u32 sem_id, s32 count)
auto& thread = sem->sq.front();
ASSERT(!thread->state.test_and_set(cpu_state::signal));
thread->cv.notify_one();
thread->notify();
sem->sq.pop_front();
}

View file

@ -323,7 +323,7 @@ s32 sys_spu_thread_group_start(u32 id)
if (thread)
{
thread->state -= cpu_state::stop;
thread->safe_notify();
thread->lock_notify();
}
}
@ -421,7 +421,7 @@ s32 sys_spu_thread_group_resume(u32 id)
if (thread)
{
thread->state -= cpu_state::suspend;
thread->safe_notify();
thread->lock_notify();
}
}
@ -504,7 +504,7 @@ s32 sys_spu_thread_group_terminate(u32 id, s32 value)
if (thread)
{
thread->state += cpu_state::stop;
thread->safe_notify();
thread->lock_notify();
}
}

View file

@ -1,6 +1,5 @@
#pragma once
#include "Utilities/SharedMutex.h"
#include "Utilities/SleepQueue.h"
namespace vm { using namespace ps3; }
@ -44,49 +43,8 @@ enum
SYS_SYNC_NOT_ADAPTIVE = 0x2000,
};
// IPC manager collection for lv2 objects of type T
template<typename T>
class ipc_manager final
{
mutable shared_mutex m_mutex;
std::unordered_map<u64, std::weak_ptr<T>> m_map;
public:
// Add new object if specified ipc_key is not used
template<typename F>
auto add(u64 ipc_key, F&& provider) -> decltype(static_cast<std::shared_ptr<T>>(provider()))
{
std::lock_guard<shared_mutex> lock(m_mutex);
// Get object location
std::weak_ptr<T>& wptr = m_map[ipc_key];
if (wptr.expired())
{
// Call a function which must return the object
std::shared_ptr<T>&& result = provider();
wptr = result;
return result;
}
return{};
}
// Get existing object with specified ipc_key
std::shared_ptr<T> get(u64 ipc_key) const
{
reader_lock lock(m_mutex);
const auto found = m_map.find(ipc_key);
if (found != m_map.end())
{
return found->second.lock();
}
return{};
}
};
extern std::mutex& get_current_thread_mutex();
extern std::condition_variable& get_current_thread_cv();
// Simple class for global mutex to pass unique_lock and check it
struct lv2_lock_t

View file

@ -15,7 +15,7 @@ extern u64 get_system_time();
void lv2_timer_t::on_task()
{
std::unique_lock<std::mutex> lock(mutex);
std::unique_lock<std::mutex> lock(get_current_thread_mutex());
while (state <= SYS_TIMER_STATE_RUN)
{
@ -23,11 +23,9 @@ void lv2_timer_t::on_task()
if (state == SYS_TIMER_STATE_RUN)
{
if (lock) lock.unlock();
LV2_LOCK;
if (get_system_time() >= expire)
while (get_system_time() >= expire)
{
const auto queue = port.lock();
@ -45,20 +43,31 @@ void lv2_timer_t::on_task()
else
{
state = SYS_TIMER_STATE_STOP; // stop if oneshot or the event port was disconnected (TODO: is it correct?)
break;
}
}
}
if (!lock)
{
lock.lock();
continue;
}
cv.wait_for(lock, std::chrono::milliseconds(1));
get_current_thread_cv().wait_for(lock, std::chrono::milliseconds(1));
}
}
std::string lv2_timer_t::get_name() const
{
return fmt::format("Timer Thread[0x%x]", id);
}
void lv2_timer_t::on_stop()
{
// Signal thread using invalid state and join
state = -1;
lock_notify();
named_thread::on_stop();
}
s32 sys_timer_create(vm::ptr<u32> timer_id)
{
sys_timer.warning("sys_timer_create(timer_id=*0x%x)", timer_id);
@ -157,15 +166,11 @@ s32 _sys_timer_start(u32 timer_id, u64 base_time, u64 period)
}
// sys_timer_start_periodic() will use current time (TODO: is it correct?)
// lock for reliable notification
std::lock_guard<std::mutex> lock(timer->mutex);
timer->expire = base_time ? base_time : start_time + period;
timer->period = period;
timer->state = SYS_TIMER_STATE_RUN;
timer->cv.notify_one();
timer->lock_notify();
return CELL_OK;
}

View file

@ -24,19 +24,9 @@ class lv2_timer_t final : public named_thread
void on_task() override;
public:
std::string get_name() const override
{
return fmt::format("Timer Thread[0x%x]", id);
}
std::string get_name() const override;
void on_stop() override
{
// Signal thread using invalid state and join
std::lock_guard<std::mutex>{ mutex }, state = -1;
cv.notify_one();
join();
}
void on_stop() override;
const u32 id{}; // Timer id

View file

@ -225,7 +225,7 @@ class idm
template<typename T, typename F, typename = std::result_of_t<F()>>
static map_type::pointer create_id(F&& provider)
{
std::lock_guard<shared_mutex> lock(g_mutex);
writer_lock lock(g_mutex);
if (auto place = allocate_id(get_tag<T>(), id_manager::id_traits<T>::min, id_manager::id_traits<T>::max))
{
@ -255,7 +255,7 @@ class idm
// Remove ID and return object
static std::shared_ptr<void> delete_id(u32 type, u32 tag, u32 id)
{
std::lock_guard<shared_mutex> lock(g_mutex);
writer_lock lock(g_mutex);
auto&& ptr = deallocate_id(tag, id);
@ -471,7 +471,7 @@ class fxm
static std::shared_ptr<void> remove(u32 type)
{
std::lock_guard<shared_mutex> lock(g_mutex);
writer_lock lock(g_mutex);
return std::move(g_map[type]);
}
@ -504,7 +504,7 @@ public:
{
std::shared_ptr<T> ptr;
{
std::lock_guard<shared_mutex> lock(g_mutex);
writer_lock lock(g_mutex);
if (!g_map[get_type<T>()])
{
@ -529,7 +529,7 @@ public:
std::shared_ptr<T> ptr;
std::shared_ptr<void> old;
{
std::lock_guard<shared_mutex> lock(g_mutex);
writer_lock lock(g_mutex);
old = std::move(g_map[get_type<T>()]);
ptr = std::make_shared<Make>(std::forward<Args>(args)...);
@ -552,7 +552,7 @@ public:
{
std::shared_ptr<T> ptr;
{
std::lock_guard<shared_mutex> lock(g_mutex);
writer_lock lock(g_mutex);
if (!g_map[get_type<T>()])
{
@ -577,7 +577,7 @@ public:
std::shared_ptr<T> ptr;
std::shared_ptr<void> old;
{
std::lock_guard<shared_mutex> lock(g_mutex);
writer_lock lock(g_mutex);
old = std::move(g_map[get_type<T>()]);
ptr = provider();
@ -600,7 +600,7 @@ public:
{
std::shared_ptr<T> ptr;
{
std::lock_guard<shared_mutex> lock(g_mutex);
writer_lock lock(g_mutex);
if (auto& value = g_map[get_type<T>()])
{

View file

@ -22,6 +22,8 @@
#endif
#endif
#include "wait_engine.h"
namespace vm
{
thread_local u64 g_tls_fault_count{};
@ -159,160 +161,7 @@ namespace vm
reservation_mutex_t g_reservation_mutex;
std::array<waiter_t, 1024> g_waiter_list;
std::size_t g_waiter_max = 0; // min unused position
std::size_t g_waiter_nil = 0; // min search position
std::mutex g_waiter_list_mutex;
waiter_t* _add_waiter(named_thread& thread, u32 addr, u32 size)
{
std::lock_guard<std::mutex> lock(g_waiter_list_mutex);
const u64 align = 0x80000000ull >> cntlz32(size);
if (!size || !addr || size > 4096 || size != align || addr & (align - 1))
{
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
thread.mutex.lock();
// look for empty position
for (; g_waiter_nil < g_waiter_max; g_waiter_nil++)
{
waiter_t& waiter = g_waiter_list[g_waiter_nil];
if (!waiter.thread)
{
// store next position for further addition
g_waiter_nil++;
return waiter.reset(addr, size, thread);
}
}
if (g_waiter_max >= g_waiter_list.size())
{
throw EXCEPTION("Waiter list limit broken (%lld)", g_waiter_max);
}
waiter_t& waiter = g_waiter_list[g_waiter_max++];
g_waiter_nil = g_waiter_max;
return waiter.reset(addr, size, thread);
}
void _remove_waiter(waiter_t* waiter)
{
std::lock_guard<std::mutex> lock(g_waiter_list_mutex);
// mark as deleted
waiter->thread = nullptr;
// amortize adding new element
g_waiter_nil = std::min<std::size_t>(g_waiter_nil, waiter - g_waiter_list.data());
// amortize polling
while (g_waiter_max && !g_waiter_list[g_waiter_max - 1].thread)
{
g_waiter_max--;
}
}
bool waiter_t::try_notify()
{
std::lock_guard<std::mutex> lock(thread->mutex);
try
{
// test predicate
if (!pred || !pred())
{
return false;
}
// clear predicate
pred = nullptr;
}
catch (...)
{
// capture any exception possibly thrown by predicate
pred = [exception = std::current_exception()]() -> bool
{
// new predicate will throw the captured exception from the original thread
std::rethrow_exception(exception);
};
}
// set addr and mask to invalid values to prevent further polling
addr = 0;
mask = ~0;
// signal thread
thread->cv.notify_one();
return true;
}
waiter_lock_t::waiter_lock_t(named_thread& thread, u32 addr, u32 size)
: m_waiter(_add_waiter(thread, addr, size))
, m_lock(thread.mutex, std::adopt_lock) // must be locked in _add_waiter
{
}
void waiter_lock_t::wait()
{
// if another thread successfully called pred(), it must be set to null
while (m_waiter->pred)
{
// if pred() called by another thread threw an exception, it'll be rethrown
if (m_waiter->pred())
{
return;
}
CHECK_EMU_STATUS;
m_waiter->thread->cv.wait(m_lock);
}
}
waiter_lock_t::~waiter_lock_t()
{
// reset some data to avoid excessive signaling
m_waiter->addr = 0;
m_waiter->mask = ~0;
m_waiter->pred = nullptr;
// unlock thread's mutex to avoid deadlock with g_waiter_list_mutex
m_lock.unlock();
_remove_waiter(m_waiter);
}
void _notify_at(u32 addr, u32 size)
{
// skip notification if no waiters available
if (_mm_mfence(), !g_waiter_max) return;
std::lock_guard<std::mutex> lock(g_waiter_list_mutex);
const u32 mask = ~(size - 1);
for (std::size_t i = 0; i < g_waiter_max; i++)
{
waiter_t& waiter = g_waiter_list[i];
// check address range overlapping using masks generated from size (power of 2)
if (waiter.thread && ((waiter.addr ^ addr) & (mask & waiter.mask)) == 0)
{
waiter.try_notify();
}
}
}
access_violation::access_violation(u64 addr, const char* cause)
: std::runtime_error(fmt::exception("Access violation %s address 0x%llx", cause, addr))
@ -320,62 +169,6 @@ namespace vm
g_tls_fault_count &= ~(1ull << 63);
}
void notify_at(u32 addr, u32 size)
{
const u64 align = 0x80000000ull >> cntlz32(size);
if (!size || !addr || size > 4096 || size != align || addr & (align - 1))
{
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
_notify_at(addr, size);
}
bool notify_all()
{
std::unique_lock<std::mutex> lock(g_waiter_list_mutex);
std::size_t waiters = 0;
std::size_t signaled = 0;
for (std::size_t i = 0; i < g_waiter_max; i++)
{
waiter_t& waiter = g_waiter_list[i];
if (waiter.thread && waiter.addr)
{
waiters++;
if (waiter.try_notify())
{
signaled++;
}
}
}
// return true if waiter list is empty or all available waiters were signaled
return waiters == signaled;
}
void start()
{
// start notification thread
thread_ctrl::spawn("vm::start thread", []()
{
while (!Emu.IsStopped())
{
// poll waiters periodically (TODO)
while (!notify_all() && !Emu.IsPaused())
{
std::this_thread::yield();
}
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
});
}
void _reservation_set(u32 addr, bool no_access = false)
{
#ifdef _WIN32
@ -422,7 +215,7 @@ namespace vm
if ((g_tls_did_break_reservation = _reservation_break(addr)))
{
lock.unlock(), _notify_at(raddr, rsize);
lock.unlock(), vm::notify_at(raddr, rsize);
}
}
@ -489,7 +282,7 @@ namespace vm
_reservation_break(addr);
// notify waiter
lock.unlock(), _notify_at(addr, size);
lock.unlock(), vm::notify_at(addr, size);
// atomic update succeeded
return true;
@ -517,7 +310,7 @@ namespace vm
// break the reservation if overlap
if ((g_tls_did_break_reservation = _reservation_break(addr)))
{
lock.unlock(), _notify_at(raddr, rsize);
lock.unlock(), vm::notify_at(raddr, rsize);
}
}
@ -536,11 +329,13 @@ namespace vm
void reservation_free()
{
if (reservation_test())
auto thread = thread_ctrl::get_current();
if (reservation_test(thread))
{
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
if (g_reservation_owner && g_reservation_owner == thread_ctrl::get_current())
if (g_reservation_owner && g_reservation_owner == thread)
{
g_tls_did_break_reservation = _reservation_break(g_reservation_addr);
}
@ -589,7 +384,7 @@ namespace vm
_reservation_break(addr);
// notify waiter
lock.unlock(), _notify_at(addr, size);
lock.unlock(), vm::notify_at(addr, size);
}
void _page_map(u32 addr, u32 size, u8 flags)
@ -1014,6 +809,8 @@ namespace vm
return nullptr;
}
extern void start();
namespace ps3
{
void init()

View file

@ -1,8 +1,9 @@
#pragma once
#include "Utilities/Thread.h"
#include <map>
class thread_ctrl;
namespace vm
{
extern u8* const g_base_addr;
@ -38,71 +39,6 @@ namespace vm
[[noreturn]] void throw_access_violation(u64 addr, const char* cause);
struct waiter_t
{
u32 addr = 0;
u32 mask = ~0;
named_thread* thread = nullptr;
std::function<bool()> pred;
waiter_t() = default;
waiter_t* reset(u32 addr, u32 size, named_thread& thread)
{
this->addr = addr;
this->mask = ~(size - 1);
this->thread = &thread;
// must be null at this point
Ensures(!pred);
return this;
}
bool try_notify();
};
class waiter_lock_t
{
waiter_t* m_waiter;
std::unique_lock<std::mutex> m_lock;
public:
waiter_lock_t(named_thread& thread, u32 addr, u32 size);
waiter_t* operator ->() const
{
return m_waiter;
}
void wait();
~waiter_lock_t();
};
// Wait until pred() returns true, addr must be aligned to size which must be a power of 2, pred() may be called by any thread
template<typename F, typename... Args>
auto wait_op(named_thread& thread, u32 addr, u32 size, F pred, Args&&... args) -> decltype(static_cast<void>(pred(args...)))
{
// return immediately if condition passed (optimistic case)
if (pred(args...)) return;
// initialize waiter and locker
waiter_lock_t lock(thread, addr, size);
// initialize predicate
lock->pred = WRAP_EXPR(pred(args...));
// start waiting
lock.wait();
}
// Notify waiters on specific addr, addr must be aligned to size which must be a power of 2
void notify_at(u32 addr, u32 size);
// Try to poll each waiter's condition (false if try_lock failed)
bool notify_all();
// This flag is changed by various reservation functions and may have different meaning.
// reservation_break() - true if the reservation was successfully broken.
// reservation_acquire() - true if another existing reservation was broken.
@ -124,7 +60,7 @@ namespace vm
bool reservation_query(u32 addr, u32 size, bool is_writing, std::function<bool()> callback);
// Returns true if the current thread owns reservation
bool reservation_test(const thread_ctrl* current = thread_ctrl::get_current());
bool reservation_test(const thread_ctrl* current);
// Break all reservations created by the current thread
void reservation_free();
@ -406,39 +342,6 @@ namespace vm
u32 stack_push(u32 size, u32 align_v);
void stack_pop_verbose(u32 addr, u32 size) noexcept;
class stack
{
u32 m_begin;
u32 m_size;
int m_page_size;
int m_position;
u8 m_align;
public:
void init(u32 begin, u32 size, u32 page_size = 180, u8 align = 0x10)
{
m_begin = begin;
m_size = size;
m_page_size = page_size;
m_position = 0;
m_align = align;
}
u32 alloc_new_page()
{
Expects(m_position + m_page_size < (int)m_size);
m_position += (int)m_page_size;
return m_begin + m_position;
}
u32 dealloc_new_page()
{
Expects(m_position - m_page_size > 0);
m_position -= (int)m_page_size;
return m_begin + m_position;
}
};
extern thread_local u64 g_tls_fault_count;
}

View file

@ -437,20 +437,6 @@ namespace vm
// Null pointer convertible to any vm::ptr* type
static null_t null;
// Call wait_op() for specified vm pointer
template<typename T, typename AT, typename F, typename... Args>
static inline auto wait_op(named_thread& thread, const _ptr_base<T, AT>& ptr, F pred, Args&&... args) -> decltype(static_cast<void>(pred(args...)))
{
return wait_op(thread, ptr.addr(), SIZE_32(T), std::move(pred), std::forward<Args>(args)...);
}
// Call notify_at() for specified vm pointer
template<typename T, typename AT>
inline void notify_at(const vm::_ptr_base<T, AT>& ptr)
{
return notify_at(ptr.addr(), SIZE_32(T));
}
}
template<typename T1, typename AT1, typename T2, typename AT2>
@ -496,17 +482,12 @@ struct to_se<vm::_ptr_base<T, AT>, Se>
using type = vm::_ptr_base<T, typename to_se<AT, Se>::type>;
};
namespace fmt
// Format pointer
template<typename T, typename AT>
struct unveil<vm::_ptr_base<T, AT>, void>
{
// Format pointer
template<typename T, typename AT>
struct unveil<vm::_ptr_base<T, AT>, void>
static inline auto get(const vm::_ptr_base<T, AT>& arg)
{
using result_type = typename unveil<AT>::result_type;
static inline result_type get_value(const vm::_ptr_base<T, AT>& arg)
{
return unveil<AT>::get_value(arg.addr());
}
};
}
return unveil<AT>::get(arg.addr());
}
};

View file

@ -192,12 +192,9 @@ struct to_se<vm::_ref_base<T, AT>, Se>
using type = vm::_ref_base<T, typename to_se<AT, Se>::type>;
};
namespace fmt
// Forbid formatting
template<typename T, typename AT>
struct unveil<vm::_ref_base<T, AT>, void>
{
// Forbid formatting
template<typename T, typename AT>
struct unveil<vm::_ref_base<T, AT>, void>
{
static_assert(!sizeof(T), "vm::_ref_base<>: ambiguous format argument");
};
}
static_assert(!sizeof(T), "vm::_ref_base<>: ambiguous format argument");
};

View file

@ -0,0 +1,158 @@
#include "stdafx.h"
#include "Emu/System.h"
#include "vm.h"
#include "wait_engine.h"
#include "Utilities/Thread.h"
#include "Utilities/SharedMutex.h"
extern std::condition_variable& get_current_thread_cv();
extern std::mutex& get_current_thread_mutex();
namespace vm
{
static shared_mutex s_mutex;
static std::unordered_set<waiter*> s_waiters(256);
bool waiter::try_notify()
{
{
std::lock_guard<mutex_t> lock(*mutex);
try
{
// Test predicate
if (!pred || !pred())
{
return false;
}
// Clear predicate
pred = nullptr;
}
catch (...)
{
// Capture any exception possibly thrown by predicate
pred = [exception = std::current_exception()]() -> bool
{
// New predicate will throw the captured exception from the original thread
std::rethrow_exception(exception);
};
}
// Set addr and mask to invalid values to prevent further polling
addr = 0;
mask = ~0;
}
// Signal thread
cond->notify_one();
return true;
}
waiter::~waiter()
{
}
waiter_lock::waiter_lock(u32 addr, u32 size)
: m_lock(get_current_thread_mutex(), std::defer_lock)
{
Expects(addr && (size & (~size + 1)) == size && (addr & (size - 1)) == 0);
m_waiter.mutex = m_lock.mutex();
m_waiter.cond = &get_current_thread_cv();
m_waiter.addr = addr;
m_waiter.mask = ~(size - 1);
{
writer_lock lock(s_mutex);
s_waiters.emplace(&m_waiter);
}
m_lock.lock();
}
void waiter_lock::wait()
{
// If another thread successfully called pred(), it must be set to null
while (m_waiter.pred)
{
// If pred() called by another thread threw an exception, it'll be rethrown
if (m_waiter.pred())
{
return;
}
CHECK_EMU_STATUS;
m_waiter.cond->wait(m_lock);
}
}
waiter_lock::~waiter_lock()
{
if (m_lock) m_lock.unlock();
writer_lock lock(s_mutex);
s_waiters.erase(&m_waiter);
}
void notify_at(u32 addr, u32 size)
{
reader_lock lock(s_mutex);
for (const auto _w : s_waiters)
{
// Check address range overlapping using masks generated from size (power of 2)
if (((_w->addr ^ addr) & (_w->mask & ~(size - 1))) == 0)
{
_w->try_notify();
}
}
}
static bool notify_all()
{
reader_lock lock(s_mutex);
std::size_t waiters = 0;
std::size_t signaled = 0;
for (const auto _w : s_waiters)
{
if (_w->addr)
{
waiters++;
if (_w->try_notify())
{
signaled++;
}
}
}
// return true if waiter list is empty or all available waiters were signaled
return waiters == signaled;
}
void start()
{
// start notification thread
thread_ctrl::spawn("vm::start thread", []()
{
while (!Emu.IsStopped())
{
// poll waiters periodically (TODO)
while (!notify_all() && !Emu.IsPaused())
{
std::this_thread::yield();
}
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
});
}
}

View file

@ -0,0 +1,63 @@
#pragma once
#include <mutex>
#include <condition_variable>
#include <functional>
class named_thread;
namespace vm
{
using mutex_t = std::mutex;
using cond_t = std::condition_variable;
struct waiter
{
u32 addr;
u32 mask;
mutex_t* mutex;
cond_t* cond;
std::function<bool()> pred;
~waiter();
bool try_notify();
};
class waiter_lock
{
waiter m_waiter;
std::unique_lock<mutex_t> m_lock;
public:
waiter_lock(u32 addr, u32 size);
waiter* operator ->()
{
return &m_waiter;
}
void wait();
~waiter_lock();
};
// Wait until pred() returns true, addr must be aligned to size which must be a power of 2, pred() may be called by any thread
template<typename F, typename... Args>
auto wait_op(u32 addr, u32 size, F&& pred, Args&&... args) -> decltype(static_cast<void>(pred(args...)))
{
// Return immediately if condition passed (optimistic case)
if (pred(args...)) return;
waiter_lock lock(addr, size);
// Initialize predicate
lock->pred = WRAP_EXPR(pred(args...));
lock.wait();
}
// Notify waiters on specific addr, addr must be aligned to size which must be a power of 2
void notify_at(u32 addr, u32 size);
}

View file

@ -1041,33 +1041,25 @@ enum Method
namespace rsx
{
template<typename ...T>
static auto make_command(u32 start_register, T... values) -> std::array<u32, sizeof...(values) + 1>
template<typename AT>
static inline u32 make_command(vm::_ptr_base<be_t<u32>, AT>& dst, u32 start_register, std::initializer_list<any32> values)
{
return{ (start_register << 2) | u32(sizeof...(values) << 18), u32(values)... };
}
*dst++ = start_register << 2 | static_cast<u32>(values.size()) << 18;
static u32 make_jump(u32 offset)
{
return CELL_GCM_METHOD_FLAG_JUMP | offset;
}
template<typename AT, typename ...T>
static size_t make_command(vm::ps3::ptr<u32, AT> &dst, u32 start_register, T... values)
{
for (u32 command : { (start_register << 2) | u32(sizeof...(values) << 18), static_cast<u32>(values)... })
for (const any32& cmd : values)
{
*dst++ = command;
*dst++ = cmd.as<u32>();
}
return sizeof...(values) + 1;
return SIZE_32(u32) * (static_cast<u32>(values.size()) + 1);
}
template<typename AT>
static size_t make_jump(vm::ps3::ptr<u32, AT> &dst, u32 offset)
static inline u32 make_jump(vm::_ptr_base<be_t<u32>, AT>& dst, u32 offset)
{
*dst++ = make_jump(offset);
return 1;
*dst++ = CELL_GCM_METHOD_FLAG_JUMP | offset;
return SIZE_32(u32);
}
std::string get_method_name(const u32 id);

View file

@ -665,7 +665,7 @@ void GLGSRender::flip(int buffer)
if (buffer_region.tile)
{
std::unique_ptr<u8> temp(new u8[buffer_height * buffer_pitch]);
std::unique_ptr<u8[]> temp(new u8[buffer_height * buffer_pitch]);
buffer_region.read(temp.get(), buffer_width, buffer_height, buffer_pitch);
__glcheck m_flip_tex_color.copy_from(temp.get(), gl::texture::format::bgra, gl::texture::type::uint_8_8_8_8);
}

View file

@ -560,43 +560,44 @@ namespace rsx
}
else
{
std::lock_guard<std::mutex> lock{ m_mtx_task };
Expects(0);
//std::lock_guard<std::mutex> lock{ m_mtx_task };
internal_task_entry &front = m_internal_tasks.front();
//internal_task_entry &front = m_internal_tasks.front();
if (front.callback())
{
front.promise.set_value();
m_internal_tasks.pop_front();
}
//if (front.callback())
//{
// front.promise.set_value();
// m_internal_tasks.pop_front();
//}
}
}
std::future<void> thread::add_internal_task(std::function<bool()> callback)
{
std::lock_guard<std::mutex> lock{ m_mtx_task };
m_internal_tasks.emplace_back(callback);
//std::future<void> thread::add_internal_task(std::function<bool()> callback)
//{
// std::lock_guard<std::mutex> lock{ m_mtx_task };
// m_internal_tasks.emplace_back(callback);
return m_internal_tasks.back().promise.get_future();
}
// return m_internal_tasks.back().promise.get_future();
//}
void thread::invoke(std::function<bool()> callback)
{
if (get_thread_ctrl() == thread_ctrl::get_current())
{
while (true)
{
if (callback())
{
break;
}
}
}
else
{
add_internal_task(callback).wait();
}
}
//void thread::invoke(std::function<bool()> callback)
//{
// if (operator->() == thread_ctrl::get_current())
// {
// while (true)
// {
// if (callback())
// {
// break;
// }
// }
// }
// else
// {
// add_internal_task(callback).wait();
// }
//}
namespace
{

View file

@ -3,7 +3,6 @@
#include <stack>
#include <deque>
#include <set>
#include <future>
#include "GCM.h"
#include "RSXTexture.h"
#include "RSXVertexProgram.h"
@ -320,7 +319,7 @@ namespace rsx
struct internal_task_entry
{
std::function<bool()> callback;
std::promise<void> promise;
//std::promise<void> promise;
internal_task_entry(std::function<bool()> callback) : callback(callback)
{
@ -331,8 +330,8 @@ namespace rsx
void do_internal_task();
public:
std::future<void> add_internal_task(std::function<bool()> callback);
void invoke(std::function<bool()> callback);
//std::future<void> add_internal_task(std::function<bool()> callback);
//void invoke(std::function<bool()> callback);
/**
* Fill buffer with 4x4 scale offset matrix.

View file

@ -451,7 +451,7 @@ namespace rsx
// method_registers[NV3089_IMAGE_IN_SIZE], in_pitch, src_offset, double(1 << 20) / (method_registers[NV3089_DS_DX]), double(1 << 20) / (method_registers[NV3089_DT_DY]),
// method_registers[NV3089_CLIP_SIZE], method_registers[NV3089_IMAGE_OUT_SIZE]);
std::unique_ptr<u8[]> temp1, temp2;
std::unique_ptr<u8[]> temp1, temp2, sw_temp;
AVPixelFormat in_format = src_color_format == CELL_GCM_TRANSFER_SCALE_FORMAT_R5G6B5 ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_ARGB;
AVPixelFormat out_format = dst_color_format == CELL_GCM_TRANSFER_SURFACE_FORMAT_R5G6B5 ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_ARGB;
@ -573,7 +573,7 @@ namespace rsx
// Check and pad texture out if we are given non square texture for swizzle to be correct
if (sw_width != out_w || sw_height != out_h)
{
std::unique_ptr<u8[]> sw_temp(new u8[out_bpp * sw_width * sw_height]);
sw_temp.reset(new u8[out_bpp * sw_width * sw_height]);
switch (out_bpp)
{

View file

@ -178,9 +178,10 @@ void Emulator::Load()
}
// Load PARAM.SFO
m_psf = psf::load_object(fs::file(elf_dir + "/../PARAM.SFO"));
m_title = psf::get_string(m_psf, "TITLE", m_path);
m_title_id = psf::get_string(m_psf, "TITLE_ID");
const auto _psf = psf::load_object(fs::file(elf_dir + "/../PARAM.SFO"));
m_title = psf::get_string(_psf, "TITLE", m_path);
m_title_id = psf::get_string(_psf, "TITLE_ID");
LOG_NOTICE(LOADER, "Title: %s", GetTitle());
LOG_NOTICE(LOADER, "Serial: %s", GetTitleID());
LOG_NOTICE(LOADER, "");
@ -287,7 +288,7 @@ void Emulator::Run()
for (auto& thread : get_all_cpu_threads())
{
thread->state -= cpu_state::stop;
thread->safe_notify();
thread->lock_notify();
}
SendDbgCommand(DID_STARTED_EMU);
@ -350,7 +351,7 @@ void Emulator::Resume()
for (auto& thread : get_all_cpu_threads())
{
thread->state -= cpu_state::dbg_global_pause;
thread->safe_notify();
thread->lock_notify();
}
rpcs3::on_resume()();
@ -376,7 +377,7 @@ void Emulator::Stop()
for (auto& thread : get_all_cpu_threads())
{
thread->state += cpu_state::dbg_global_stop;
thread->safe_notify();
thread->lock_notify();
}
}

View file

@ -2,7 +2,6 @@
#include "VFS.h"
#include "DbgCommand.h"
#include "Loader/PSF.h"
enum class frame_type;
@ -73,7 +72,6 @@ class Emulator final
std::string m_elf_path;
std::string m_title_id;
std::string m_title;
psf::registry m_psf;
public:
Emulator();
@ -125,11 +123,6 @@ public:
return m_title;
}
const psf::registry& GetPSF() const
{
return m_psf;
}
u64 GetPauseTime()
{
return m_pause_amend_time;

View file

@ -437,7 +437,7 @@ void InterpreterDisAsmFrame::DoRun(wxCommandEvent& WXUNUSED(event))
if (cpu && cpu->state.test(cpu_state_pause))
{
cpu->state -= cpu_state::dbg_pause;
cpu->safe_notify();
cpu->lock_notify();
}
}
@ -459,7 +459,7 @@ void InterpreterDisAsmFrame::DoStep(wxCommandEvent& WXUNUSED(event))
return state.test_and_reset(cpu_state::dbg_pause);
}))
{
cpu->safe_notify();
cpu->lock_notify();
}
}
}

View file

@ -19,7 +19,7 @@
#include "Gui/CgDisasm.h"
#include "Crypto/unpkg.h"
#include <future>
#include "Utilities/Thread.h"
#ifndef _WIN32
#include "frame_icon.xpm"
@ -263,30 +263,39 @@ void MainFrame::InstallPkg(wxCommandEvent& WXUNUSED(event))
}
}
wxProgressDialog pdlg("PKG Decrypter / Installer", "Please wait, unpacking...", 1000, this, wxPD_AUTO_HIDE | wxPD_APP_MODAL);
wxProgressDialog pdlg("PKG Installer", "Please wait, unpacking...", 1000, this, wxPD_AUTO_HIDE | wxPD_APP_MODAL | wxPD_CAN_ABORT);
volatile f64 progress = 0.0;
// Run PKG unpacking asynchronously
auto result = std::async(std::launch::async, WRAP_EXPR(pkg_install(pkg_f, local_path + '/', progress)));
// Wait for the completion
while (result.wait_for(15ms) != std::future_status::ready)
// Synchronization variable
atomic_t<double> progress(0.);
{
// Update progress window
pdlg.Update(progress * pdlg.GetRange());
// Run PKG unpacking asynchronously
scope_thread worker("PKG Installer", [&]
{
if (pkg_install(pkg_f, local_path + '/', progress))
{
progress = 1.;
}
// Update main frame
Update();
wxGetApp().ProcessPendingEvents();
// TODO: Ask user to delete files on cancellation/failure?
});
// Wait for the completion
while (std::this_thread::sleep_for(5ms), progress < 1.)
{
// Update progress window
if (!pdlg.Update(static_cast<int>(progress * pdlg.GetRange())))
{
// Installation cancelled (signal with negative value)
progress -= 1.;
break;
}
}
}
pdlg.Close();
if (result.get())
if (progress >= 1.)
{
LOG_SUCCESS(LOADER, "PKG: Package successfully installed in %s", local_path);
// Refresh game list
m_game_viewer->Refresh();
}

View file

@ -75,6 +75,9 @@
<ClCompile Include="..\Utilities\Config.cpp" />
<ClCompile Include="..\Utilities\rXml.cpp" />
<ClCompile Include="..\Utilities\Semaphore.cpp" />
<ClCompile Include="..\Utilities\SharedMutex.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="..\Utilities\StrFmt.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
@ -224,6 +227,7 @@
<ClCompile Include="Emu\Cell\SPUASMJITRecompiler.cpp" />
<ClCompile Include="Emu\Cell\SPUDisAsm.cpp" />
<ClCompile Include="Emu\Cell\SPUInterpreter.cpp" />
<ClCompile Include="Emu\Memory\wait_engine.cpp" />
<ClCompile Include="Emu\RSX\CgBinaryFragmentProgram.cpp" />
<ClCompile Include="Emu\RSX\CgBinaryVertexProgram.cpp" />
<ClCompile Include="Emu\RSX\Common\BufferUtils.cpp" />
@ -349,6 +353,7 @@
<ItemGroup>
<ClInclude Include="..\3rdparty\stblib\stb_image.h" />
<ClInclude Include="..\Utilities\Atomic.h" />
<ClInclude Include="..\Utilities\AtomicPtr.h" />
<ClInclude Include="..\Utilities\AutoPause.h" />
<ClInclude Include="..\Utilities\BEType.h" />
<ClInclude Include="..\Utilities\BitField.h" />
@ -449,6 +454,7 @@
<ClInclude Include="Emu\Audio\Null\NullAudioThread.h" />
<ClInclude Include="Emu\Cell\Common.h" />
<ClInclude Include="Emu\Cell\ErrorCodes.h" />
<ClInclude Include="Emu\Cell\lv2\IPC.h" />
<ClInclude Include="Emu\Cell\lv2\sys_cond.h" />
<ClInclude Include="Emu\Cell\lv2\sys_dbg.h" />
<ClInclude Include="Emu\Cell\lv2\sys_event.h" />
@ -555,6 +561,7 @@
<ClInclude Include="Emu\CPU\CPUDisAsm.h" />
<ClInclude Include="Emu\CPU\CPUThread.h" />
<ClInclude Include="Emu\DbgCommand.h" />
<ClInclude Include="Emu\Memory\wait_engine.h" />
<ClInclude Include="Emu\VFS.h" />
<ClInclude Include="Emu\GameInfo.h" />
<ClInclude Include="Emu\IdManager.h" />

View file

@ -845,6 +845,12 @@
<ClCompile Include="Emu\VFS.cpp">
<Filter>Emu</Filter>
</ClCompile>
<ClCompile Include="..\Utilities\SharedMutex.cpp">
<Filter>Utilities</Filter>
</ClCompile>
<ClCompile Include="Emu\Memory\wait_engine.cpp">
<Filter>Emu\Memory</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Crypto\aes.h">
@ -1591,5 +1597,14 @@
<ClInclude Include="..\Utilities\GSL.h">
<Filter>Utilities</Filter>
</ClInclude>
<ClInclude Include="..\Utilities\AtomicPtr.h">
<Filter>Utilities</Filter>
</ClInclude>
<ClInclude Include="Emu\Memory\wait_engine.h">
<Filter>Emu\Memory</Filter>
</ClInclude>
<ClInclude Include="Emu\Cell\lv2\IPC.h">
<Filter>Emu\Cell\lv2</Filter>
</ClInclude>
</ItemGroup>
</Project>

View file

@ -36,10 +36,10 @@
#ifdef _MSC_VER
#include "Emu/RSX/VK/VKGSRender.h"
#include "Emu/RSX/D3D12/D3D12GSRender.h"
#include "Emu/Audio/XAudio2/XAudio2Thread.h"
#endif
#ifdef _WIN32
#include "Emu/Audio/XAudio2/XAudio2Thread.h"
#include <wx/msw/wrapwin.h>
#endif
@ -105,7 +105,7 @@ cfg::map_entry<std::function<std::shared_ptr<AudioThread>()>> g_cfg_audio_render
{
{ "Null", PURE_EXPR(std::make_shared<NullAudioThread>()) },
{ "OpenAL", PURE_EXPR(std::make_shared<OpenALThread>()) },
#ifdef _MSC_VER
#ifdef _WIN32
{ "XAudio2", PURE_EXPR(std::make_shared<XAudio2Thread>()) },
#endif
});

View file

@ -18,6 +18,7 @@
<SDLCheck>false</SDLCheck>
<RuntimeTypeInfo>true</RuntimeTypeInfo>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<AdditionalOptions>/Zc:throwingNew</AdditionalOptions>
</ClCompile>
<Link>
<AdditionalDependencies>ws2_32.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib</AdditionalDependencies>