Try out Cursor
This commit is contained in:
parent
d883b58260
commit
f6cc30a30c
@ -1,4 +1,4 @@
|
||||
#ifdef NETWORK
|
||||
#ifdef NETWORK
|
||||
|
||||
#include "WebSocketClient.h"
|
||||
#include <iostream>
|
||||
@ -43,34 +43,22 @@ namespace ZL {
|
||||
}
|
||||
|
||||
void WebSocketClient::processIncomingMessage(const std::string& msg) {
|
||||
// Логика парсинга...
|
||||
/*if (msg.rfind("ID:", 0) == 0) {
|
||||
clientId = std::stoi(msg.substr(3));
|
||||
}*/
|
||||
|
||||
// Безопасно кладем в очередь для главного потока
|
||||
std::lock_guard<std::mutex> lock(queueMutex);
|
||||
messageQueue.push(msg);
|
||||
// Lock-free push: producer (I/O thread) pushes to its buffer
|
||||
readProducerBuf_.load(std::memory_order_relaxed)->push_back(msg);
|
||||
}
|
||||
|
||||
void WebSocketClient::Poll() {
|
||||
std::lock_guard<std::mutex> lock(queueMutex);
|
||||
|
||||
while (!messageQueue.empty()) {
|
||||
/*
|
||||
auto nowTime = std::chrono::system_clock::now();
|
||||
|
||||
//Apply server delay:
|
||||
nowTime -= std::chrono::milliseconds(CLIENT_DELAY);
|
||||
|
||||
auto now_ms = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
nowTime.time_since_epoch()
|
||||
).count();*/
|
||||
std::string msg = messageQueue.front();
|
||||
messageQueue.pop();
|
||||
|
||||
// Lock-free drain: swap consumer buffer with producer if ours is empty, then process all
|
||||
MessageBuf* c = readConsumerBuf_.load(std::memory_order_acquire);
|
||||
if (c->empty()) {
|
||||
MessageBuf* p = readProducerBuf_.exchange(c, std::memory_order_acq_rel);
|
||||
readConsumerBuf_.store(p, std::memory_order_release);
|
||||
c = p;
|
||||
}
|
||||
for (std::string& msg : *c) {
|
||||
HandlePollMessage(msg);
|
||||
}
|
||||
c->clear();
|
||||
}
|
||||
|
||||
|
||||
@ -79,54 +67,49 @@ namespace ZL {
|
||||
if (!connected) return;
|
||||
|
||||
std::string finalMessage = SignMessage(message);
|
||||
/*
|
||||
#ifdef ENABLE_NETWORK_CHECKSUM
|
||||
// Вычисляем хеш. Для примера используем std::hash,
|
||||
// но в продакшене лучше взять быструю реализацию типа MurmurHash3.
|
||||
size_t hashValue = std::hash<std::string>{}(message + NET_SECRET);
|
||||
auto ss = std::make_shared<std::string>(std::move(finalMessage));
|
||||
|
||||
// Преобразуем хеш в hex-строку для передачи
|
||||
std::stringstream ss_hash;
|
||||
ss_hash << std::hex << hashValue;
|
||||
// Lock-free push to write queue
|
||||
writeProducerBuf_.load(std::memory_order_relaxed)->push_back(ss);
|
||||
|
||||
// Добавляем хеш в конец сообщения через разделитель
|
||||
// Например: "UPD:12345:pos...#hash:a1b2c3d4"
|
||||
finalMessage += "#hash:" + ss_hash.str();
|
||||
#endif
|
||||
*/
|
||||
auto ss = std::make_shared<std::string>(finalMessage);
|
||||
|
||||
std::lock_guard<std::mutex> lock(writeMutex_);
|
||||
writeQueue_.push(ss);
|
||||
|
||||
// Если сейчас ничего не записывается, инициируем первую запись
|
||||
if (!isWriting_) {
|
||||
// Start write chain if not already writing
|
||||
bool expected = false;
|
||||
if (isWriting_.compare_exchange_strong(expected, true, std::memory_order_acq_rel)) {
|
||||
doWrite();
|
||||
}
|
||||
}
|
||||
|
||||
void WebSocketClient::doWrite() {
|
||||
// Эта функция всегда вызывается под мьютексом или из колбэка
|
||||
if (writeQueue_.empty()) {
|
||||
isWriting_ = false;
|
||||
// Lock-free: take next message from consumer buffer; swap buffers if drained
|
||||
WriteBuf* c = writeConsumerBuf_.load(std::memory_order_acquire);
|
||||
if (currentWriteBuf_ == nullptr || currentWriteIndex_ >= currentWriteBuf_->size()) {
|
||||
if (currentWriteBuf_) {
|
||||
currentWriteBuf_->clear();
|
||||
}
|
||||
currentWriteBuf_ = c;
|
||||
if (currentWriteBuf_->empty()) {
|
||||
WriteBuf* p = writeProducerBuf_.exchange(currentWriteBuf_, std::memory_order_acq_rel);
|
||||
writeConsumerBuf_.store(p, std::memory_order_release);
|
||||
currentWriteBuf_ = p;
|
||||
}
|
||||
currentWriteIndex_ = 0;
|
||||
}
|
||||
if (currentWriteIndex_ >= currentWriteBuf_->size()) {
|
||||
isWriting_.store(false, std::memory_order_release);
|
||||
return;
|
||||
}
|
||||
|
||||
isWriting_ = true;
|
||||
auto message = writeQueue_.front();
|
||||
std::shared_ptr<std::string> message = (*currentWriteBuf_)[currentWriteIndex_++];
|
||||
|
||||
// Захватываем self (shared_from_this), чтобы объект не удалился во время записи
|
||||
ws_->async_write(
|
||||
boost::asio::buffer(*message),
|
||||
[this, message](boost::beast::error_code ec, std::size_t) {
|
||||
if (ec) {
|
||||
connected = false;
|
||||
isWriting_.store(false, std::memory_order_release);
|
||||
return;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(writeMutex_);
|
||||
writeQueue_.pop(); // Удаляем отправленное сообщение
|
||||
doWrite(); // Проверяем следующее
|
||||
doWrite();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
#pragma once
|
||||
#pragma once
|
||||
|
||||
#ifdef NETWORK
|
||||
|
||||
#include "WebSocketClientBase.h"
|
||||
#include <queue>
|
||||
#include <vector>
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/websocket.hpp>
|
||||
#include <boost/asio/connect.hpp>
|
||||
@ -11,21 +13,30 @@
|
||||
|
||||
namespace ZL {
|
||||
|
||||
// Lock-free SPSC double-buffer: producer pushes to one buffer, consumer swaps and drains the other.
|
||||
// No mutexes; avoids contention under high message load.
|
||||
class WebSocketClient : public WebSocketClientBase {
|
||||
private:
|
||||
// Переиспользуем io_context из TaskManager
|
||||
boost::asio::io_context& ioc_;
|
||||
|
||||
// Объекты переехали в члены класса
|
||||
std::unique_ptr<boost::beast::websocket::stream<boost::beast::tcp_stream>> ws_;
|
||||
boost::beast::flat_buffer buffer_;
|
||||
|
||||
std::queue<std::string> messageQueue;
|
||||
std::mutex queueMutex; // Защита для messageQueue
|
||||
// Incoming messages: I/O thread pushes, main thread drains in Poll()
|
||||
using MessageBuf = std::vector<std::string>;
|
||||
MessageBuf readBuffer0_;
|
||||
MessageBuf readBuffer1_;
|
||||
std::atomic<MessageBuf*> readProducerBuf_;
|
||||
std::atomic<MessageBuf*> readConsumerBuf_;
|
||||
|
||||
std::queue<std::shared_ptr<std::string>> writeQueue_;
|
||||
bool isWriting_ = false;
|
||||
std::mutex writeMutex_; // Отдельный мьютекс для очереди записи
|
||||
// Outgoing messages: main thread pushes in Send(), doWrite()/completion drains
|
||||
using WriteBuf = std::vector<std::shared_ptr<std::string>>;
|
||||
WriteBuf writeBuffer0_;
|
||||
WriteBuf writeBuffer1_;
|
||||
std::atomic<WriteBuf*> writeProducerBuf_;
|
||||
std::atomic<WriteBuf*> writeConsumerBuf_;
|
||||
WriteBuf* currentWriteBuf_ = nullptr;
|
||||
size_t currentWriteIndex_ = 0;
|
||||
std::atomic<bool> isWriting_{ false };
|
||||
|
||||
bool connected = false;
|
||||
|
||||
@ -34,7 +45,13 @@ namespace ZL {
|
||||
void processIncomingMessage(const std::string& msg);
|
||||
|
||||
public:
|
||||
explicit WebSocketClient(boost::asio::io_context& ioc) : ioc_(ioc) {}
|
||||
explicit WebSocketClient(boost::asio::io_context& ioc)
|
||||
: ioc_(ioc)
|
||||
, readProducerBuf_(&readBuffer0_)
|
||||
, readConsumerBuf_(&readBuffer1_)
|
||||
, writeProducerBuf_(&writeBuffer0_)
|
||||
, writeConsumerBuf_(&writeBuffer1_)
|
||||
{}
|
||||
|
||||
void Connect(const std::string& host, uint16_t port) override;
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
#ifdef NETWORK
|
||||
#ifdef NETWORK
|
||||
|
||||
#include "WebSocketClientBase.h"
|
||||
#include <iostream>
|
||||
@ -70,10 +70,7 @@ namespace ZL {
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> bLock(boxesMutex);
|
||||
serverBoxes_ = std::move(parsedBoxes);
|
||||
}
|
||||
serverBoxes_ = std::move(parsedBoxes);
|
||||
return;
|
||||
}
|
||||
if (msg.rfind("RESPAWN_ACK:", 0) == 0) {
|
||||
@ -81,14 +78,8 @@ namespace ZL {
|
||||
if (parts.size() >= 2) {
|
||||
try {
|
||||
int respawnedPlayerId = std::stoi(parts[1]);
|
||||
{
|
||||
std::lock_guard<std::mutex> rLock(respawnMutex_);
|
||||
pendingRespawns_.push_back(respawnedPlayerId);
|
||||
}
|
||||
{
|
||||
std::lock_guard<std::mutex> pLock(playersMutex);
|
||||
remotePlayers.erase(respawnedPlayerId);
|
||||
}
|
||||
pendingRespawns_.push_back(respawnedPlayerId);
|
||||
remotePlayers.erase(respawnedPlayerId);
|
||||
std::cout << "Client: Received RESPAWN_ACK for player " << respawnedPlayerId << std::endl;
|
||||
}
|
||||
catch (...) {}
|
||||
@ -110,10 +101,7 @@ namespace ZL {
|
||||
);
|
||||
destruction.destroyedBy = std::stoi(parts[6]);
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(boxDestructionsMutex_);
|
||||
pendingBoxDestructions_.push_back(destruction);
|
||||
}
|
||||
pendingBoxDestructions_.push_back(destruction);
|
||||
|
||||
std::cout << "Client: Received BOX_DESTROYED for box " << destruction.boxIndex
|
||||
<< " destroyed by player " << destruction.destroyedBy << std::endl;
|
||||
@ -146,7 +134,6 @@ namespace ZL {
|
||||
pi.rotation = q.toRotationMatrix();
|
||||
|
||||
pi.velocity = std::stof(parts[10]);
|
||||
std::lock_guard<std::mutex> pl(projMutex_);
|
||||
pendingProjectiles_.push_back(pi);
|
||||
}
|
||||
catch (...) {}
|
||||
@ -168,7 +155,6 @@ namespace ZL {
|
||||
);
|
||||
di.killerId = std::stoi(parts[6]);
|
||||
|
||||
std::lock_guard<std::mutex> dl(deathsMutex_);
|
||||
pendingDeaths_.push_back(di);
|
||||
}
|
||||
catch (...) {}
|
||||
@ -215,9 +201,7 @@ namespace ZL {
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> pLock(playersMutex);
|
||||
auto& rp = remotePlayers[remoteId];
|
||||
|
||||
rp.add_state(remoteState);
|
||||
}
|
||||
}
|
||||
@ -243,10 +227,7 @@ namespace ZL {
|
||||
// Используем твой handle_full_sync, начиная со 2-го индекса (пропускаем ID в playerParts)
|
||||
remoteState.handle_full_sync(playerParts, 1);
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> pLock(playersMutex);
|
||||
remotePlayers[rId].add_state(remoteState);
|
||||
}
|
||||
remotePlayers[rId].add_state(remoteState);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -263,30 +244,26 @@ namespace ZL {
|
||||
}
|
||||
|
||||
std::vector<ProjectileInfo> WebSocketClientBase::getPendingProjectiles() {
|
||||
std::lock_guard<std::mutex> lock(projMutex_);
|
||||
auto copy = pendingProjectiles_;
|
||||
pendingProjectiles_.clear();
|
||||
std::vector<ProjectileInfo> copy;
|
||||
copy.swap(pendingProjectiles_);
|
||||
return copy;
|
||||
}
|
||||
|
||||
std::vector<DeathInfo> WebSocketClientBase::getPendingDeaths() {
|
||||
std::lock_guard<std::mutex> lock(deathsMutex_);
|
||||
auto copy = pendingDeaths_;
|
||||
pendingDeaths_.clear();
|
||||
std::vector<DeathInfo> copy;
|
||||
copy.swap(pendingDeaths_);
|
||||
return copy;
|
||||
}
|
||||
|
||||
std::vector<int> WebSocketClientBase::getPendingRespawns() {
|
||||
std::lock_guard<std::mutex> lock(respawnMutex_);
|
||||
auto copy = pendingRespawns_;
|
||||
pendingRespawns_.clear();
|
||||
std::vector<int> copy;
|
||||
copy.swap(pendingRespawns_);
|
||||
return copy;
|
||||
}
|
||||
|
||||
std::vector<BoxDestroyedInfo> WebSocketClientBase::getPendingBoxDestructions() {
|
||||
std::lock_guard<std::mutex> lock(boxDestructionsMutex_);
|
||||
auto copy = pendingBoxDestructions_;
|
||||
pendingBoxDestructions_.clear();
|
||||
std::vector<BoxDestroyedInfo> copy;
|
||||
copy.swap(pendingBoxDestructions_);
|
||||
return copy;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,34 +1,25 @@
|
||||
#pragma once
|
||||
#pragma once
|
||||
|
||||
#include "NetworkInterface.h"
|
||||
#include <queue>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace ZL {
|
||||
|
||||
|
||||
// All state in WebSocketClientBase is only accessed from the main thread:
|
||||
// HandlePollMessage() runs from Poll(), and get*() are called from Game/Space on the main thread.
|
||||
// No mutexes needed.
|
||||
class WebSocketClientBase : public INetworkClient {
|
||||
protected:
|
||||
|
||||
|
||||
std::unordered_map<int, ClientStateInterval> remotePlayers;
|
||||
std::mutex playersMutex;
|
||||
|
||||
// Серверные коробки
|
||||
std::vector<std::pair<Eigen::Vector3f, Eigen::Matrix3f>> serverBoxes_;
|
||||
std::mutex boxesMutex;
|
||||
|
||||
std::vector<ProjectileInfo> pendingProjectiles_;
|
||||
std::mutex projMutex_;
|
||||
|
||||
std::vector<DeathInfo> pendingDeaths_;
|
||||
std::mutex deathsMutex_;
|
||||
|
||||
std::vector<int> pendingRespawns_;
|
||||
std::mutex respawnMutex_;
|
||||
|
||||
std::vector<BoxDestroyedInfo> pendingBoxDestructions_;
|
||||
std::mutex boxDestructionsMutex_;
|
||||
int clientId = -1;
|
||||
int64_t timeOffset = 0;
|
||||
|
||||
@ -42,12 +33,10 @@ namespace ZL {
|
||||
std::string SignMessage(const std::string& msg);
|
||||
|
||||
std::unordered_map<int, ClientStateInterval> getRemotePlayers() override {
|
||||
std::lock_guard<std::mutex> lock(playersMutex);
|
||||
return remotePlayers;
|
||||
}
|
||||
|
||||
std::vector<std::pair<Eigen::Vector3f, Eigen::Matrix3f>> getServerBoxes() override {
|
||||
std::lock_guard<std::mutex> lock(boxesMutex);
|
||||
return serverBoxes_;
|
||||
}
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user