2015-12-23 13:25:28 -07:00
|
|
|
/* RetroArch - A frontend for libretro.
|
|
|
|
* Copyright (C) 2010-2014 - Hans-Kristian Arntzen
|
2017-01-22 13:40:32 +01:00
|
|
|
* Copyright (C) 2011-2017 - Daniel De Matteis
|
|
|
|
* Copyright (C) 2016-2017 - Gregor Richards
|
2022-05-13 22:28:52 -03:00
|
|
|
* Copyright (C) 2021-2022 - Roberto V. Rampim
|
2015-12-23 13:25:28 -07:00
|
|
|
*
|
|
|
|
* RetroArch is free software: you can redistribute it and/or modify it under the terms
|
|
|
|
* of the GNU General Public License as published by the Free Software Found-
|
|
|
|
* ation, either version 3 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* RetroArch is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
|
|
|
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE. See the GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with RetroArch.
|
|
|
|
* If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __RARCH_NETPLAY_PRIVATE_H
|
|
|
|
#define __RARCH_NETPLAY_PRIVATE_H
|
2016-05-12 10:20:14 +02:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
#include "netplay.h"
|
2022-06-17 17:38:56 -03:00
|
|
|
#include "netplay_protocol.h"
|
|
|
|
|
|
|
|
#include <libretro.h>
|
2016-05-09 20:30:47 +02:00
|
|
|
|
2016-11-26 16:06:52 -05:00
|
|
|
#include <streams/trans_stream.h>
|
2016-05-09 20:30:47 +02:00
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
#include "../../retroarch_types.h"
|
|
|
|
|
2022-07-26 07:15:42 -03:00
|
|
|
#ifndef VITA
|
|
|
|
#define RARCH_DEFAULT_PORT 55435
|
|
|
|
#else
|
|
|
|
#define RARCH_DEFAULT_PORT 19492
|
|
|
|
#endif
|
|
|
|
#define RARCH_DISCOVERY_PORT 55435
|
|
|
|
#define RARCH_DEFAULT_NICK "Anonymous"
|
2016-12-10 20:36:57 -05:00
|
|
|
|
2016-12-13 17:07:49 -05:00
|
|
|
#define NETPLAY_PASS_LEN 128
|
2018-11-23 08:15:22 -05:00
|
|
|
#define NETPLAY_PASS_HASH_LEN 64 /* length of a SHA-256 hash */
|
2016-12-13 17:07:49 -05:00
|
|
|
|
2022-09-17 20:15:19 -03:00
|
|
|
#define NETPLAY_ANNOUNCE_AFTER 5000000
|
|
|
|
#define NETPLAY_PING_AFTER 3000000
|
|
|
|
#define NETPLAY_ANNOUNCE_TIME 20000000
|
|
|
|
#define NETPLAY_PING_TIME 3000000
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
#define MAX_SERVER_STALL_TIME_USEC (5*1000*1000)
|
|
|
|
#define MAX_CLIENT_STALL_TIME_USEC (10*1000*1000)
|
|
|
|
#define CATCH_UP_CHECK_TIME_USEC (500*1000)
|
|
|
|
#define MAX_RETRIES 16
|
|
|
|
#define RETRY_MS 500
|
|
|
|
#define MAX_INPUT_DEVICES 16
|
2017-08-25 14:38:21 -04:00
|
|
|
|
|
|
|
/* We allow only 32 clients to fit into a 32-bit bitmap */
|
2022-06-17 17:38:56 -03:00
|
|
|
#define MAX_CLIENTS 32
|
2017-08-25 14:38:21 -04:00
|
|
|
|
2017-09-13 20:51:57 -04:00
|
|
|
/* Because the callback keyboard reverses some assumptions, when the keyboard
|
|
|
|
* callbacks are in use, we assign a pseudodevice for it */
|
|
|
|
#define RETRO_DEVICE_NETPLAY_KEYBOARD RETRO_DEVICE_SUBCLASS(RETRO_DEVICE_KEYBOARD, 65535)
|
2016-12-13 20:16:22 -05:00
|
|
|
|
2022-05-30 20:58:57 -03:00
|
|
|
#define NETPLAY_MAX_STALL_FRAMES 60
|
|
|
|
#define NETPLAY_FRAME_RUN_TIME_WINDOW 120
|
|
|
|
#define NETPLAY_MAX_REQ_STALL_TIME 60
|
2016-12-24 15:25:03 -05:00
|
|
|
#define NETPLAY_MAX_REQ_STALL_FREQUENCY 120
|
2016-12-15 22:34:18 -05:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
#define PREV_PTR(x) ((x) == 0 ? netplay->buffer_size - 1 : (x) - 1)
|
|
|
|
#define NEXT_PTR(x) ((x + 1) % netplay->buffer_size)
|
|
|
|
|
2016-09-30 13:31:58 -04:00
|
|
|
/* Quirks mandated by how particular cores save states. This is distilled from
|
|
|
|
* the larger set of quirks that the quirks environment can communicate. */
|
2022-08-11 18:17:33 -03:00
|
|
|
#define NETPLAY_QUIRK_INITIALIZATION (1 << 0)
|
|
|
|
#define NETPLAY_QUIRK_ENDIAN_DEPENDENT (1 << 1)
|
|
|
|
#define NETPLAY_QUIRK_PLATFORM_DEPENDENT (1 << 2)
|
2016-09-30 13:31:58 -04:00
|
|
|
|
2016-11-25 11:03:12 -05:00
|
|
|
/* Compression protocols supported */
|
|
|
|
#define NETPLAY_COMPRESSION_ZLIB (1<<0)
|
|
|
|
#if HAVE_ZLIB
|
|
|
|
#define NETPLAY_COMPRESSION_SUPPORTED NETPLAY_COMPRESSION_ZLIB
|
|
|
|
#else
|
|
|
|
#define NETPLAY_COMPRESSION_SUPPORTED 0
|
|
|
|
#endif
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* The keys supported by netplay */
|
|
|
|
enum netplay_keys
|
|
|
|
{
|
|
|
|
NETPLAY_KEY_UNKNOWN = 0,
|
|
|
|
#define K(k) NETPLAY_KEY_ ## k,
|
|
|
|
#define KL(k,l) K(k)
|
|
|
|
#include "netplay_keys.h"
|
|
|
|
#undef KL
|
|
|
|
#undef K
|
|
|
|
NETPLAY_KEY_LAST
|
|
|
|
};
|
|
|
|
|
2016-12-03 17:01:19 -05:00
|
|
|
enum netplay_cmd
|
|
|
|
{
|
|
|
|
/* Basic commands */
|
|
|
|
|
|
|
|
/* Acknowlegement response */
|
|
|
|
NETPLAY_CMD_ACK = 0x0000,
|
|
|
|
|
|
|
|
/* Failed acknowlegement response */
|
|
|
|
NETPLAY_CMD_NAK = 0x0001,
|
|
|
|
|
|
|
|
/* Gracefully disconnects from host */
|
|
|
|
NETPLAY_CMD_DISCONNECT = 0x0002,
|
|
|
|
|
|
|
|
/* Input data */
|
|
|
|
NETPLAY_CMD_INPUT = 0x0003,
|
|
|
|
|
2016-12-12 21:57:14 -05:00
|
|
|
/* Non-input data */
|
|
|
|
NETPLAY_CMD_NOINPUT = 0x0004,
|
|
|
|
|
2016-12-03 17:01:19 -05:00
|
|
|
/* Initialization commands */
|
|
|
|
|
|
|
|
/* Inform the other side of our nick (must be first command) */
|
|
|
|
NETPLAY_CMD_NICK = 0x0020,
|
|
|
|
|
2016-12-13 15:26:20 -05:00
|
|
|
/* Give the connection password */
|
|
|
|
NETPLAY_CMD_PASSWORD = 0x0021,
|
|
|
|
|
2016-12-17 14:48:07 -05:00
|
|
|
/* Give core/content info */
|
|
|
|
NETPLAY_CMD_INFO = 0x0022,
|
|
|
|
|
2016-12-03 18:53:57 -05:00
|
|
|
/* Initial synchronization info (frame, sram, player info) */
|
2016-12-17 14:48:07 -05:00
|
|
|
NETPLAY_CMD_SYNC = 0x0023,
|
2016-12-03 17:22:50 -05:00
|
|
|
|
2016-12-03 17:01:19 -05:00
|
|
|
/* Join spectator mode */
|
2016-12-17 14:48:07 -05:00
|
|
|
NETPLAY_CMD_SPECTATE = 0x0024,
|
2016-12-03 17:01:19 -05:00
|
|
|
|
|
|
|
/* Join play mode */
|
2016-12-17 14:48:07 -05:00
|
|
|
NETPLAY_CMD_PLAY = 0x0025,
|
2016-12-03 18:53:57 -05:00
|
|
|
|
|
|
|
/* Report player mode */
|
2016-12-17 14:48:07 -05:00
|
|
|
NETPLAY_CMD_MODE = 0x0026,
|
2016-12-03 17:01:19 -05:00
|
|
|
|
2016-12-15 11:56:47 -05:00
|
|
|
/* Report player mode refused */
|
2016-12-17 14:48:07 -05:00
|
|
|
NETPLAY_CMD_MODE_REFUSED = 0x0027,
|
2016-12-15 11:56:47 -05:00
|
|
|
|
2016-12-03 17:01:19 -05:00
|
|
|
/* Loading and synchronization */
|
|
|
|
|
|
|
|
/* Send the CRC hash of a frame's state */
|
|
|
|
NETPLAY_CMD_CRC = 0x0040,
|
|
|
|
|
|
|
|
/* Request a savestate */
|
|
|
|
NETPLAY_CMD_REQUEST_SAVESTATE = 0x0041,
|
|
|
|
|
|
|
|
/* Send a savestate for the client to load */
|
|
|
|
NETPLAY_CMD_LOAD_SAVESTATE = 0x0042,
|
|
|
|
|
|
|
|
/* Pauses the game, takes no arguments */
|
|
|
|
NETPLAY_CMD_PAUSE = 0x0043,
|
|
|
|
|
|
|
|
/* Resumes the game, takes no arguments */
|
|
|
|
NETPLAY_CMD_RESUME = 0x0044,
|
|
|
|
|
2016-12-24 15:25:03 -05:00
|
|
|
/* Request that a client stall because it's running fast */
|
|
|
|
NETPLAY_CMD_STALL = 0x0045,
|
|
|
|
|
2017-02-15 14:40:37 -05:00
|
|
|
/* Request a core reset */
|
|
|
|
NETPLAY_CMD_RESET = 0x0046,
|
|
|
|
|
2016-12-03 17:01:19 -05:00
|
|
|
/* Sends over cheats enabled on client (unsupported) */
|
2017-02-15 14:40:37 -05:00
|
|
|
NETPLAY_CMD_CHEATS = 0x0047,
|
2016-12-03 17:01:19 -05:00
|
|
|
|
|
|
|
/* Misc. commands */
|
|
|
|
|
|
|
|
/* Sends multiple config requests over,
|
|
|
|
* See enum netplay_cmd_cfg */
|
|
|
|
NETPLAY_CMD_CFG = 0x0061,
|
|
|
|
|
|
|
|
/* CMD_CFG streamlines sending multiple
|
|
|
|
configurations. This acknowledges
|
|
|
|
each one individually */
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
NETPLAY_CMD_CFG_ACK = 0x0062,
|
|
|
|
|
|
|
|
/* Chat commands */
|
|
|
|
|
|
|
|
/* Sends a player chat message.
|
|
|
|
* The server is responsible for formatting/truncating
|
|
|
|
* the message and relaying it to all playing clients,
|
|
|
|
* including the one that sent the message. */
|
|
|
|
NETPLAY_CMD_PLAYER_CHAT = 0x1000,
|
|
|
|
|
|
|
|
/* Ping commands */
|
|
|
|
|
|
|
|
/* Sends a ping command to the server/client.
|
|
|
|
* Intended for estimating the latency between these two peers. */
|
|
|
|
NETPLAY_CMD_PING_REQUEST = 0x1100,
|
|
|
|
NETPLAY_CMD_PING_RESPONSE = 0x1101,
|
|
|
|
|
|
|
|
/* Setting commands */
|
|
|
|
|
|
|
|
/* These host settings should be honored by the client,
|
|
|
|
* but they are not enforced. */
|
|
|
|
NETPLAY_CMD_SETTING_ALLOW_PAUSING = 0x2000,
|
|
|
|
NETPLAY_CMD_SETTING_INPUT_LATENCY_FRAMES = 0x2001
|
2016-12-03 17:01:19 -05:00
|
|
|
};
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
#define NETPLAY_CMD_SYNC_BIT_PAUSED (1U<<31)
|
|
|
|
#define NETPLAY_CMD_PLAY_BIT_SLAVE (1U<<31)
|
|
|
|
#define NETPLAY_CMD_MODE_BIT_YOU (1U<<31)
|
|
|
|
#define NETPLAY_CMD_MODE_BIT_PLAYING (1U<<30)
|
|
|
|
#define NETPLAY_CMD_MODE_BIT_SLAVE (1U<<29)
|
2016-12-10 20:36:57 -05:00
|
|
|
|
2016-12-15 11:56:47 -05:00
|
|
|
/* These are the reasons given for mode changes to be rejected */
|
|
|
|
enum netplay_cmd_mode_reasons
|
|
|
|
{
|
|
|
|
/* Other/unknown reason */
|
|
|
|
NETPLAY_CMD_MODE_REFUSED_REASON_OTHER,
|
|
|
|
|
|
|
|
/* You don't have permission to play */
|
|
|
|
NETPLAY_CMD_MODE_REFUSED_REASON_UNPRIVILEGED,
|
|
|
|
|
|
|
|
/* There are no free player slots */
|
2017-06-06 21:35:09 -04:00
|
|
|
NETPLAY_CMD_MODE_REFUSED_REASON_NO_SLOTS,
|
|
|
|
|
|
|
|
/* You're changing modes too fast */
|
2017-09-10 19:42:32 -04:00
|
|
|
NETPLAY_CMD_MODE_REFUSED_REASON_TOO_FAST,
|
|
|
|
|
|
|
|
/* You requested a particular port but it's not available */
|
|
|
|
NETPLAY_CMD_MODE_REFUSED_REASON_NOT_AVAILABLE
|
|
|
|
};
|
|
|
|
|
2017-09-10 22:49:25 -04:00
|
|
|
/* Real preferences for sharing devices */
|
|
|
|
enum rarch_netplay_share_preference
|
|
|
|
{
|
2021-11-06 00:27:33 +01:00
|
|
|
/* Prefer not to share, shouldn't be set
|
|
|
|
as a sharing mode for an shared device */
|
2022-06-17 17:38:56 -03:00
|
|
|
NETPLAY_SHARE_NO_SHARING = 0x00,
|
2017-09-10 22:49:25 -04:00
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* No preference. Only for requests.
|
|
|
|
Set if sharing is requested but either
|
2017-09-10 22:49:25 -04:00
|
|
|
* digital or analog doesn't have a preference. */
|
2022-06-17 17:38:56 -03:00
|
|
|
NETPLAY_SHARE_NO_PREFERENCE = 0x01,
|
2017-09-10 22:49:25 -04:00
|
|
|
|
|
|
|
/* For digital devices */
|
2022-06-17 17:38:56 -03:00
|
|
|
NETPLAY_SHARE_DIGITAL_BITS = 0x1C,
|
|
|
|
NETPLAY_SHARE_DIGITAL_OR = 0x04,
|
|
|
|
NETPLAY_SHARE_DIGITAL_XOR = 0x08,
|
|
|
|
NETPLAY_SHARE_DIGITAL_VOTE = 0x0C,
|
2017-09-10 22:49:25 -04:00
|
|
|
|
|
|
|
/* For analog devices */
|
2022-06-17 17:38:56 -03:00
|
|
|
NETPLAY_SHARE_ANALOG_BITS = 0xE0,
|
|
|
|
NETPLAY_SHARE_ANALOG_MAX = 0x20,
|
2017-09-10 22:49:25 -04:00
|
|
|
NETPLAY_SHARE_ANALOG_AVERAGE = 0x40
|
|
|
|
};
|
|
|
|
|
2016-12-03 17:01:19 -05:00
|
|
|
enum rarch_netplay_stall_reason
|
|
|
|
{
|
2016-12-09 13:32:04 -05:00
|
|
|
NETPLAY_STALL_NONE = 0,
|
2017-02-22 23:19:22 -05:00
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* We're so far ahead that we can't read
|
|
|
|
more data without overflowing the buffer */
|
2016-12-09 13:32:04 -05:00
|
|
|
NETPLAY_STALL_RUNNING_FAST,
|
2017-02-22 23:19:22 -05:00
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* We're in spectator or slave mode
|
|
|
|
and are running ahead at all */
|
2017-02-22 23:19:22 -05:00
|
|
|
NETPLAY_STALL_SPECTATOR_WAIT,
|
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* Our actual execution is catching up
|
|
|
|
with latency-adjusted input frames */
|
2017-01-18 16:07:17 -05:00
|
|
|
NETPLAY_STALL_INPUT_LATENCY,
|
2017-02-22 23:19:22 -05:00
|
|
|
|
|
|
|
/* The server asked us to stall */
|
2022-08-11 20:36:55 -03:00
|
|
|
NETPLAY_STALL_SERVER_REQUESTED
|
2016-12-03 17:01:19 -05:00
|
|
|
};
|
|
|
|
|
2017-08-25 14:38:21 -04:00
|
|
|
/* Input state for a particular client-device pair */
|
|
|
|
typedef struct netplay_input_state
|
|
|
|
{
|
|
|
|
/* The next input state (forming a list) */
|
|
|
|
struct netplay_input_state *next;
|
|
|
|
|
|
|
|
/* Whose data is this? */
|
|
|
|
uint32_t client_num;
|
|
|
|
|
|
|
|
/* How many words of input data do we have? */
|
|
|
|
uint32_t size;
|
|
|
|
|
2020-12-23 01:13:53 +00:00
|
|
|
/* Is this a buffer with real data? */
|
|
|
|
bool used;
|
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* The input data itself (note: should expand
|
|
|
|
beyond 1 by overallocating). */
|
2017-08-25 14:38:21 -04:00
|
|
|
uint32_t data[1];
|
2020-08-15 18:53:52 +02:00
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* Warning: No members allowed past this point,
|
|
|
|
due to dynamic resizing. */
|
2017-08-25 14:38:21 -04:00
|
|
|
} *netplay_input_state_t;
|
2016-12-10 20:36:57 -05:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
struct delta_frame
|
|
|
|
{
|
2021-11-06 00:27:33 +01:00
|
|
|
/* The resolved input, i.e., what's actually
|
|
|
|
going to the core. One input per device. */
|
2020-08-15 18:53:52 +02:00
|
|
|
netplay_input_state_t resolved_input[MAX_INPUT_DEVICES]; /* ptr alignment */
|
|
|
|
|
|
|
|
/* The real input */
|
|
|
|
netplay_input_state_t real_input[MAX_INPUT_DEVICES]; /* ptr alignment */
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
|
2021-12-25 09:42:22 -03:00
|
|
|
/* The simulated input. is_real here means the simulation is done, i.e.,
|
|
|
|
* it's a real simulation, not real input. */
|
2022-06-17 17:38:56 -03:00
|
|
|
netplay_input_state_t simulated_input[MAX_INPUT_DEVICES];
|
2021-12-25 09:42:22 -03:00
|
|
|
|
2016-09-14 23:19:47 -04:00
|
|
|
/* The serialized state of the core at this frame, before input */
|
2015-12-23 13:25:28 -07:00
|
|
|
void *state;
|
|
|
|
|
2020-08-15 18:53:52 +02:00
|
|
|
uint32_t frame;
|
|
|
|
|
2016-09-14 23:19:47 -04:00
|
|
|
/* The CRC-32 of the serialized state if we've calculated it, else 0 */
|
|
|
|
uint32_t crc;
|
|
|
|
|
2016-09-12 09:13:26 -04:00
|
|
|
/* Have we read local input? */
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
bool have_local;
|
2016-09-12 09:13:26 -04:00
|
|
|
|
2016-12-12 12:34:41 -05:00
|
|
|
/* Have we read the real (remote) input? */
|
2017-08-25 14:38:21 -04:00
|
|
|
bool have_real[MAX_CLIENTS];
|
2020-08-15 18:53:52 +02:00
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* A bit derpy, but this is how we know if the delta
|
|
|
|
* has been used at all. */
|
|
|
|
bool used;
|
2015-12-23 13:25:28 -07:00
|
|
|
};
|
|
|
|
|
2016-09-25 11:05:50 -04:00
|
|
|
struct socket_buffer
|
|
|
|
{
|
|
|
|
unsigned char *data;
|
|
|
|
size_t bufsz;
|
2020-08-15 18:53:52 +02:00
|
|
|
size_t start;
|
|
|
|
size_t end;
|
2016-09-25 11:05:50 -04:00
|
|
|
size_t read;
|
|
|
|
};
|
|
|
|
|
2022-07-07 08:40:38 -03:00
|
|
|
/* We do it like this instead of using sockaddr_storage
|
|
|
|
in order to have relay server IPv6 support on platforms
|
|
|
|
that do not support IPv6. */
|
|
|
|
typedef struct netplay_address
|
|
|
|
{
|
|
|
|
/* Can hold an IPv6 address aswell as an IPv4 address in the
|
|
|
|
::ffff:a.b.c.d format. */
|
|
|
|
uint8_t addr[16];
|
|
|
|
} netplay_address_t;
|
|
|
|
|
2016-12-05 00:04:01 -05:00
|
|
|
/* Each connection gets a connection struct */
|
|
|
|
struct netplay_connection
|
|
|
|
{
|
2021-12-25 09:42:22 -03:00
|
|
|
/* Timer used to estimate a connection's latency */
|
|
|
|
retro_time_t ping_timer;
|
|
|
|
|
2022-07-07 08:40:38 -03:00
|
|
|
/* Connection's address */
|
|
|
|
netplay_address_t addr;
|
|
|
|
|
2016-12-09 14:14:54 -05:00
|
|
|
/* Buffers for sending and receiving data */
|
2022-06-17 17:38:56 -03:00
|
|
|
struct socket_buffer send_packet_buffer;
|
|
|
|
struct socket_buffer recv_packet_buffer;
|
2016-12-09 14:14:54 -05:00
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* What compression does this peer support? */
|
|
|
|
uint32_t compression_supported;
|
|
|
|
|
|
|
|
/* Salt associated with password transaction */
|
|
|
|
uint32_t salt;
|
|
|
|
|
|
|
|
/* Which netplay protocol is this connection running? */
|
|
|
|
uint32_t netplay_protocol;
|
2016-12-09 14:14:54 -05:00
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* If the mode is a DELAYED_DISCONNECT or SPECTATOR,
|
|
|
|
* the transmission of the mode change may have to
|
|
|
|
* wait for data to be forwarded.
|
|
|
|
* This is the frame to wait for, or 0 if no delay
|
|
|
|
* is active. */
|
2017-06-06 21:35:09 -04:00
|
|
|
uint32_t delay_frame;
|
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* For the server: When was the last time we requested
|
|
|
|
* this client to stall?
|
2020-08-15 18:53:52 +02:00
|
|
|
* For the client: How many frames of stall do we have left? */
|
|
|
|
uint32_t stall_frame;
|
|
|
|
|
2022-08-02 08:31:55 -03:00
|
|
|
/* How many times has this connection caused a stall because it's running
|
|
|
|
too slow? */
|
|
|
|
uint32_t stall_slow;
|
|
|
|
|
2021-12-25 09:42:22 -03:00
|
|
|
/* What latency is this connection running on?
|
|
|
|
* Network latency has limited precision as we estimate it
|
|
|
|
* once every pre-frame. */
|
|
|
|
int32_t ping;
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* fd associated with this connection */
|
|
|
|
int fd;
|
2016-12-24 15:25:03 -05:00
|
|
|
|
2020-08-15 18:53:52 +02:00
|
|
|
/* Mode of the connection */
|
|
|
|
enum rarch_netplay_connection_mode mode;
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Is this connection stalling? */
|
|
|
|
enum rarch_netplay_stall_reason stall;
|
|
|
|
|
2020-08-15 18:53:52 +02:00
|
|
|
/* Nickname of peer */
|
|
|
|
char nick[NETPLAY_NICK_LEN];
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Is this connection buffer in use? */
|
|
|
|
bool active;
|
|
|
|
|
2020-08-15 18:53:52 +02:00
|
|
|
/* Is this player paused? */
|
|
|
|
bool paused;
|
|
|
|
|
|
|
|
/* Is this connection allowed to play (server only)? */
|
|
|
|
bool can_play;
|
|
|
|
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
/* Did we request a ping response? */
|
|
|
|
bool ping_requested;
|
2016-12-05 00:04:01 -05:00
|
|
|
};
|
|
|
|
|
2016-12-18 16:52:21 -05:00
|
|
|
/* Compression transcoder */
|
|
|
|
struct compression_transcoder
|
|
|
|
{
|
|
|
|
const struct trans_stream_backend *compression_backend;
|
|
|
|
const struct trans_stream_backend *decompression_backend;
|
2022-06-17 17:38:56 -03:00
|
|
|
void *compression_stream;
|
2016-12-18 16:52:21 -05:00
|
|
|
void *decompression_stream;
|
|
|
|
};
|
|
|
|
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
typedef struct mitm_id
|
|
|
|
{
|
|
|
|
uint32_t magic;
|
|
|
|
uint8_t unique[12];
|
|
|
|
} mitm_id_t;
|
|
|
|
|
|
|
|
#define NETPLAY_MITM_MAX_PENDING 8
|
2022-07-07 08:40:38 -03:00
|
|
|
struct netplay_mitm_handler
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
{
|
2022-07-07 08:40:38 -03:00
|
|
|
struct
|
|
|
|
{
|
|
|
|
retro_time_t timeout;
|
|
|
|
mitm_id_t id;
|
|
|
|
netplay_address_t addr;
|
|
|
|
int fd;
|
|
|
|
bool has_addr;
|
|
|
|
} pending[NETPLAY_MITM_MAX_PENDING];
|
|
|
|
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
mitm_id_t id_buf;
|
2022-07-07 08:40:38 -03:00
|
|
|
netplay_address_t addr_buf;
|
2021-12-25 09:42:22 -03:00
|
|
|
struct addrinfo *base_addr;
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
const struct addrinfo *addr;
|
2021-12-25 09:42:22 -03:00
|
|
|
size_t id_recvd;
|
2022-07-07 08:40:38 -03:00
|
|
|
size_t addr_recvd;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct netplay_ban_list
|
|
|
|
{
|
|
|
|
netplay_address_t *list;
|
|
|
|
size_t size;
|
|
|
|
size_t allocated;
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
};
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
struct netplay_chat
|
|
|
|
{
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
uint32_t frames;
|
|
|
|
char nick[NETPLAY_NICK_LEN];
|
|
|
|
char msg[NETPLAY_CHAT_MAX_SIZE];
|
|
|
|
} messages[NETPLAY_CHAT_MAX_MESSAGES];
|
|
|
|
};
|
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
struct netplay
|
|
|
|
{
|
2022-06-17 17:38:56 -03:00
|
|
|
/* We stall if we're far enough ahead that we
|
2021-11-06 00:27:33 +01:00
|
|
|
* couldn't transparently rewind.
|
2022-06-17 17:38:56 -03:00
|
|
|
* To know if we could transparently rewind,
|
2021-11-06 00:27:33 +01:00
|
|
|
* we need to know how long running a frame takes.
|
|
|
|
* We record that every frame and get a running (window) average. */
|
2020-08-15 18:53:52 +02:00
|
|
|
retro_time_t frame_run_time[NETPLAY_FRAME_RUN_TIME_WINDOW];
|
2022-06-17 17:38:56 -03:00
|
|
|
retro_time_t frame_run_time_sum;
|
|
|
|
retro_time_t frame_run_time_avg;
|
|
|
|
|
|
|
|
/* When did we start falling behind? */
|
|
|
|
retro_time_t catch_up_time;
|
|
|
|
/* How long have we been stalled? */
|
|
|
|
retro_time_t stall_time;
|
2017-05-16 00:15:06 -05:00
|
|
|
|
2022-09-17 20:15:19 -03:00
|
|
|
retro_time_t next_announce;
|
|
|
|
retro_time_t next_ping;
|
|
|
|
|
2021-12-25 09:42:22 -03:00
|
|
|
struct retro_callbacks cbs;
|
2016-12-09 14:14:54 -05:00
|
|
|
|
2021-12-25 09:42:22 -03:00
|
|
|
/* Compression transcoder */
|
2022-06-17 17:38:56 -03:00
|
|
|
struct compression_transcoder compress_nil;
|
|
|
|
struct compression_transcoder compress_zlib;
|
2016-12-05 00:04:01 -05:00
|
|
|
|
2021-12-25 09:42:22 -03:00
|
|
|
/* MITM session id */
|
|
|
|
mitm_id_t mitm_session_id;
|
2016-12-03 23:08:31 -05:00
|
|
|
|
2022-07-07 08:40:38 -03:00
|
|
|
/* Banned addresses */
|
|
|
|
struct netplay_ban_list ban_list;
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Chat messages */
|
|
|
|
struct netplay_chat chat;
|
2017-08-25 14:38:21 -04:00
|
|
|
|
2021-12-25 09:42:22 -03:00
|
|
|
/* MITM connection handler */
|
2022-07-07 08:40:38 -03:00
|
|
|
struct netplay_mitm_handler *mitm_handler;
|
2017-09-13 11:39:41 -04:00
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* All of our connections */
|
|
|
|
struct netplay_connection *connections;
|
2016-12-09 14:14:54 -05:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
struct delta_frame *buffer;
|
2020-08-15 18:53:52 +02:00
|
|
|
|
2016-10-30 14:27:43 -04:00
|
|
|
/* A buffer into which to compress frames for transfer */
|
|
|
|
uint8_t *zbuffer;
|
|
|
|
|
2021-12-25 09:42:22 -03:00
|
|
|
size_t connections_size;
|
|
|
|
size_t buffer_size;
|
|
|
|
size_t zbuffer_size;
|
2016-12-09 14:14:54 -05:00
|
|
|
/* The size of our packet buffers */
|
|
|
|
size_t packet_buffer_size;
|
2021-12-25 09:42:22 -03:00
|
|
|
/* Size of savestates */
|
|
|
|
size_t state_size;
|
2016-12-09 14:14:54 -05:00
|
|
|
|
2017-01-18 16:07:17 -05:00
|
|
|
/* The frame we're currently inputting */
|
2017-01-20 14:28:18 -05:00
|
|
|
size_t self_ptr;
|
2022-06-17 17:38:56 -03:00
|
|
|
/* The frame we're currently running, which may be
|
2021-11-06 00:27:33 +01:00
|
|
|
* behind the frame we're currently inputting if
|
|
|
|
* we're using input latency */
|
2017-01-18 16:07:17 -05:00
|
|
|
size_t run_ptr;
|
2016-12-09 22:37:50 -05:00
|
|
|
/* The first frame at which some data might be unreliable */
|
2015-12-23 13:25:28 -07:00
|
|
|
size_t other_ptr;
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Pointer to the first frame for which we're missing
|
2021-11-06 00:27:33 +01:00
|
|
|
* the data of at least one connected player excluding ourself.
|
2022-06-17 17:38:56 -03:00
|
|
|
* Generally, other_ptr <= unread_ptr <= self_ptr,
|
|
|
|
* but unread_ptr can get ahead of self_ptr if the peer
|
2021-11-06 00:27:33 +01:00
|
|
|
* is running fast. */
|
2016-12-10 20:36:57 -05:00
|
|
|
size_t unread_ptr;
|
2017-08-25 14:38:21 -04:00
|
|
|
/* Pointer to the next frame to read from each client */
|
2017-09-11 10:40:34 -04:00
|
|
|
size_t read_ptr[MAX_CLIENTS];
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Pointer to the next frame to read from the server
|
2021-11-06 00:27:33 +01:00
|
|
|
* (as it might not be a player but still synchronizes)
|
|
|
|
*/
|
2016-12-10 20:36:57 -05:00
|
|
|
size_t server_ptr;
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
/* A pointer used temporarily for replay. */
|
|
|
|
size_t replay_ptr;
|
2021-12-25 09:42:22 -03:00
|
|
|
|
|
|
|
/* Pseudo random seed */
|
|
|
|
unsigned long simple_rand_next;
|
|
|
|
|
2022-08-11 18:17:33 -03:00
|
|
|
/* Quirks in the savestate implementation */
|
|
|
|
uint32_t quirks;
|
|
|
|
|
2021-12-25 09:42:22 -03:00
|
|
|
/* Our client number */
|
|
|
|
uint32_t self_client_num;
|
|
|
|
|
|
|
|
/* Bitmap of clients with input devices */
|
|
|
|
uint32_t connected_players;
|
|
|
|
|
|
|
|
/* Bitmap of clients playing in slave mode (should be a subset of
|
|
|
|
* connected_players) */
|
|
|
|
uint32_t connected_slaves;
|
|
|
|
|
|
|
|
/* For each client, the bitmap of devices they're connected to */
|
|
|
|
uint32_t client_devices[MAX_CLIENTS];
|
|
|
|
|
|
|
|
/* For each device, the bitmap of clients connected */
|
2022-06-17 17:38:56 -03:00
|
|
|
uint32_t device_clients[MAX_INPUT_DEVICES];
|
2021-12-25 09:42:22 -03:00
|
|
|
|
|
|
|
/* Our own device bitmap */
|
|
|
|
uint32_t self_devices;
|
|
|
|
|
|
|
|
/* The device types for every connected device.
|
|
|
|
* We store them and ignore any menu changes,
|
|
|
|
* as netplay needs fixed devices. */
|
|
|
|
uint32_t config_devices[MAX_INPUT_DEVICES];
|
|
|
|
|
|
|
|
uint32_t self_frame_count;
|
|
|
|
uint32_t run_frame_count;
|
|
|
|
uint32_t other_frame_count;
|
|
|
|
uint32_t unread_frame_count;
|
|
|
|
uint32_t read_frame_count[MAX_CLIENTS];
|
|
|
|
uint32_t server_frame_count;
|
2016-12-09 22:37:50 -05:00
|
|
|
uint32_t replay_frame_count;
|
2015-12-23 13:25:28 -07:00
|
|
|
|
2022-08-13 17:28:43 -03:00
|
|
|
/* Frequency with which to check CRCs */
|
|
|
|
uint32_t check_frames;
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* How far behind did we fall? */
|
|
|
|
uint32_t catch_up_behind;
|
|
|
|
|
|
|
|
/* Number of desync operations we're currently performing.
|
|
|
|
* If set, we don't attempt to stay in sync. */
|
|
|
|
uint32_t desync;
|
|
|
|
|
|
|
|
/* Host settings */
|
|
|
|
int32_t input_latency_frames_min;
|
|
|
|
int32_t input_latency_frames_max;
|
2020-08-15 18:53:52 +02:00
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* TCP connection for listening (server only) */
|
|
|
|
int listen_fd;
|
|
|
|
|
|
|
|
int frame_run_time_ptr;
|
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* Latency frames; positive to hide network latency,
|
|
|
|
* negative to hide input latency */
|
2020-08-15 18:53:52 +02:00
|
|
|
int input_latency_frames;
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Our mode and status */
|
|
|
|
enum rarch_netplay_connection_mode self_mode;
|
2021-12-25 09:42:22 -03:00
|
|
|
|
2020-08-15 18:53:52 +02:00
|
|
|
/* Are we stalled? */
|
|
|
|
enum rarch_netplay_stall_reason stall;
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Keyboard mapping (network and host) */
|
|
|
|
uint16_t mapping_hton[RETROK_LAST];
|
|
|
|
uint16_t mapping_ntoh[NETPLAY_KEY_LAST];
|
2020-08-15 18:53:52 +02:00
|
|
|
|
|
|
|
/* TCP port (only set if serving) */
|
|
|
|
uint16_t tcp_port;
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
uint16_t ext_tcp_port;
|
2020-08-15 18:53:52 +02:00
|
|
|
|
|
|
|
/* The sharing mode for each device */
|
|
|
|
uint8_t device_share_modes[MAX_INPUT_DEVICES];
|
|
|
|
|
|
|
|
/* Our nickname */
|
|
|
|
char nick[NETPLAY_NICK_LEN];
|
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* Set to true if we have a device that most cores
|
|
|
|
* translate to "up/down" actions, typically a keyboard.
|
|
|
|
* We need to keep track of this because with such a device,
|
|
|
|
* we need to "fix" the input state to the frame BEFORE a
|
|
|
|
* state load, then perform the state load, and the
|
|
|
|
* up/down states will proceed as expected. */
|
2020-08-15 18:53:52 +02:00
|
|
|
bool have_updown_device;
|
2015-12-23 13:25:28 -07:00
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Are we the server? */
|
|
|
|
bool is_server;
|
|
|
|
|
|
|
|
bool nat_traversal;
|
|
|
|
|
|
|
|
/* Have we checked whether CRCs are valid at all? */
|
|
|
|
bool crc_validity_checked;
|
|
|
|
|
|
|
|
/* Are they valid? */
|
|
|
|
bool crcs_valid;
|
|
|
|
|
|
|
|
/* Netplay pausing */
|
|
|
|
bool local_paused;
|
|
|
|
bool remote_paused;
|
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
/* Are we replaying old frames? */
|
|
|
|
bool is_replay;
|
2016-09-14 18:03:40 -04:00
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
/* Opposite of stalling, should we be catching up? */
|
|
|
|
bool catch_up;
|
2016-09-14 18:03:40 -04:00
|
|
|
|
2021-11-06 00:27:33 +01:00
|
|
|
/* Force a rewind to other_frame_count/other_ptr.
|
|
|
|
* This is for synchronized events, such as restarting
|
|
|
|
* or savestate loading. */
|
2016-09-14 18:03:40 -04:00
|
|
|
bool force_rewind;
|
2015-12-23 13:25:28 -07:00
|
|
|
|
2017-02-15 14:40:37 -05:00
|
|
|
/* Force a reset */
|
|
|
|
bool force_reset;
|
|
|
|
|
2016-12-12 19:34:50 -05:00
|
|
|
/* Force our state to be sent to all connections */
|
|
|
|
bool force_send_savestate;
|
2016-09-14 23:19:47 -04:00
|
|
|
|
|
|
|
/* Have we requested a savestate as a sync point? */
|
|
|
|
bool savestate_request_outstanding;
|
|
|
|
|
Netplay Stuff (#13375)
* Netplay Stuff
## PROTOCOL FALLBACK
In order to support older clients a protocol fallback system was introduced.
The host will no longer send its header automatically after a TCP connection is established, instead, it awaits for the client to send his before determining which protocol this connection is going to operate on.
Netplay has now two protocols, a low protocol and a high protocol; the low protocol is the minimum protocol it supports, while the high protocol is the highest protocol it can operate on.
To fully support older clients, a hack was necessary: sending the high protocol in the unused client's header salt field, while keeping the protocol field to the low protocol. Without this hack we would only be able to support older clients if a newer client was the host.
Any future system can make use of this system by checking connection->netplay_protocol, which is available for both the client and host.
## NETPLAY CHAT
Starting with protocol 6, netplay chat is available through the new NETPLAY_CMD_PLAYER_CHAT command.
Limitations of the command code, which causes a disconnection on unknown commands, makes this system not possible on protocol 5.
Protocol 5 connections can neither send nor receive chat, but other netplay operations are unaffected.
Clients send chat as a string to the server, and it's the server's sole responsability to relay chat messages.
As of now, sending chat uses RetroArch's input menu, while the display of on-screen chat uses a widget overlay and RetroArch's notifications as a fallback.
If a new overlay and/or input system is desired, no backwards compatibility changes need to be made.
Only clients in playing mode (as opposed to spectating mode) can send and receive chat.
## SETTINGS SHARING
Some settings are better used when both host and clients share the same configuration.
As of protocol 6, the following settings will be shared from host to clients (without altering a client's configuration file): input latency frames and allow pausing.
## NETPLAY TUNNEL/MITM
With the current MITM system being defunct (at least as of 1.9.X), a new system was in order to solve most if not all of the problems with the current system.
This new system uses a tunneling approach, which is similar to most VPN and tunneling services around.
Tunnel commands:
RATS[unique id] (RetroArch Tunnel Session) - 16 bytes -> When this command is sent with a zeroed unique id, the tunnel server interprets this as a netplay host wanting to create a new session, in this case, the same command is returned to the host, but now with its unique session id. When a client needs to connect to a host, this command is sent with the unique session id of the host, causing the tunnel server to send a RATL command to the host.
RATL[unique id] (RetroArch Tunnel Link) - 16 bytes -> The tunnel server sends this command to the host when a client wants to connect to the host. Once the host receives this command, it establishes a new connection to the tunnel server, sending this command together with the client's unique id through this new connection, causing the tunnel server to link this connection to the connection of the client.
RATP (RetroArch Tunnel Ping) - 4 bytes -> The tunnel server sends this command to verify that the host, whom the session belongs to, is still around. The host replies with the same command. A session is closed if the tunnel server can not verify that the host is alive.
Operations:
Host -> Instead of listening and accepting connections, it connects to the tunnel server, requests a new session and then monitor this connection for new linking requests. Once a request is received, it establishes a new connection to the tunnel server for linking with a client. The tunnel server's address and port are obtained by querying the lobby server. The host will publish its session id together with the rest of its info to the lobby server.
Client -> It connects to the tunnel server and then sends the session id of the host it wants to connect to. A host's session id is obtained from the json data sent by the lobby server.
Improvements (from current MITM system):
No longer a risk of TCP port exhaustion; we only use one port now at the tunnel server.
Very little cpu usage. About 95% net I/O bound now.
Future backwards compatible with any and all changes to netplay as it no longer runs any netplay logic at MITM servers.
No longer operates the host in client mode, which was a source of many of the current problems.
Cleaner and more maintainable system and code.
Notable functions:
netplay_mitm_query -> Grabs the tunnel's address and port from the lobby server.
init_tcp_socket -> Handles the creation and operation mode of the TCP socket based on whether it's host, host+MITM or client.
handle_mitm_connection -> Creates and completes linking connections and replies to ping commands (only 1 of each per call to not affect performance).
## MISC
Ping Limiter: If a client's estimated latency to the server is higher than this value, connection will be dropped just before finishing the netplay handshake.
Ping Counter: A ping counter (similar to the FPS one) can be shown in the bottom right corner of the screen, if you are connected to a host.
LAN Discovery: Refactored and moved to its own "Refresh Netplay LAN List" button.
## FIXES
Many minor fixes to the current netplay implementation are also included.
* Remove NETPLAY_TEST_BUILD
2021-12-19 12:58:01 -03:00
|
|
|
/* Host settings */
|
|
|
|
bool allow_pausing;
|
2015-12-23 13:25:28 -07:00
|
|
|
};
|
|
|
|
|
2022-06-17 17:38:56 -03:00
|
|
|
void video_frame_net(const void *data,
|
|
|
|
unsigned width, unsigned height, size_t pitch);
|
|
|
|
void audio_sample_net(int16_t left, int16_t right);
|
|
|
|
size_t audio_sample_batch_net(const int16_t *data, size_t frames);
|
|
|
|
int16_t input_state_net(unsigned port, unsigned device,
|
|
|
|
unsigned idx, unsigned id);
|
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/***************************************************************
|
|
|
|
* NETPLAY-BUF.C
|
|
|
|
**************************************************************/
|
2016-12-03 17:01:19 -05:00
|
|
|
|
|
|
|
/**
|
2016-12-15 08:42:03 -05:00
|
|
|
* netplay_send
|
2016-12-03 17:01:19 -05:00
|
|
|
*
|
2016-12-15 08:42:03 -05:00
|
|
|
* Queue the given data for sending.
|
|
|
|
*/
|
2021-11-06 00:27:33 +01:00
|
|
|
bool netplay_send(struct socket_buffer *sbuf,
|
|
|
|
int sockfd, const void *buf,
|
|
|
|
size_t len);
|
2016-12-15 08:42:03 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* netplay_send_flush
|
2016-12-03 17:01:19 -05:00
|
|
|
*
|
2021-11-06 00:27:33 +01:00
|
|
|
* Flush unsent data in the given socket buffer,
|
|
|
|
* blocking to do so if requested.
|
2016-12-15 08:42:03 -05:00
|
|
|
*
|
|
|
|
* Returns false only on socket failures, true otherwise.
|
|
|
|
*/
|
2021-11-06 00:27:33 +01:00
|
|
|
bool netplay_send_flush(struct socket_buffer *sbuf,
|
|
|
|
int sockfd, bool block);
|
2016-12-03 17:01:19 -05:00
|
|
|
|
|
|
|
/**
|
2016-12-15 08:42:03 -05:00
|
|
|
* netplay_recv
|
2016-12-03 17:01:19 -05:00
|
|
|
*
|
2016-12-15 08:42:03 -05:00
|
|
|
* Receive buffered or fresh data.
|
|
|
|
*
|
2022-08-23 22:32:09 -03:00
|
|
|
* Returns number of bytes returned, which may be short, 0, or -1 on error.
|
2016-12-15 08:42:03 -05:00
|
|
|
*/
|
2022-08-23 22:32:09 -03:00
|
|
|
ssize_t netplay_recv(struct socket_buffer *sbuf, int sockfd,
|
|
|
|
void *buf, size_t len);
|
2016-12-03 17:01:19 -05:00
|
|
|
|
|
|
|
/**
|
2016-12-15 08:42:03 -05:00
|
|
|
* netplay_recv_reset
|
2016-12-03 17:01:19 -05:00
|
|
|
*
|
2021-11-06 00:27:33 +01:00
|
|
|
* Reset our recv buffer so that future netplay_recvs
|
|
|
|
* will read the same data again.
|
2016-12-15 08:42:03 -05:00
|
|
|
*/
|
|
|
|
void netplay_recv_reset(struct socket_buffer *sbuf);
|
2016-12-03 17:01:19 -05:00
|
|
|
|
|
|
|
/**
|
2016-12-15 08:42:03 -05:00
|
|
|
* netplay_recv_flush
|
2016-12-03 17:01:19 -05:00
|
|
|
*
|
2021-11-06 00:27:33 +01:00
|
|
|
* Flush our recv buffer, so a future netplay_recv_reset
|
|
|
|
* will reset to this point.
|
2016-12-15 08:42:03 -05:00
|
|
|
*/
|
|
|
|
void netplay_recv_flush(struct socket_buffer *sbuf);
|
|
|
|
|
|
|
|
/***************************************************************
|
|
|
|
* NETPLAY-DELTA.C
|
|
|
|
**************************************************************/
|
2016-12-03 17:01:19 -05:00
|
|
|
|
|
|
|
/**
|
2016-12-15 08:42:03 -05:00
|
|
|
* netplay_delta_frame_ready
|
2016-12-03 17:01:19 -05:00
|
|
|
*
|
2021-11-06 00:27:33 +01:00
|
|
|
* Prepares, if possible, a delta frame for input, and reports
|
|
|
|
* whether it is ready.
|
2016-12-03 17:01:19 -05:00
|
|
|
*
|
2021-11-06 00:27:33 +01:00
|
|
|
* Returns: True if the delta frame is ready for input at
|
|
|
|
* the given frame, false otherwise.
|
2016-12-15 08:42:03 -05:00
|
|
|
*/
|
2021-11-06 00:27:33 +01:00
|
|
|
bool netplay_delta_frame_ready(netplay_t *netplay,
|
|
|
|
struct delta_frame *delta,
|
|
|
|
uint32_t frame);
|
2016-12-03 17:01:19 -05:00
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/***************************************************************
|
|
|
|
* NETPLAY-FRONTEND.C
|
|
|
|
**************************************************************/
|
2015-12-23 13:25:28 -07:00
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/**
|
|
|
|
* input_poll_net
|
2022-06-17 17:38:56 -03:00
|
|
|
* @netplay : pointer to netplay object
|
2016-12-15 08:42:03 -05:00
|
|
|
*
|
|
|
|
* Poll the network if necessary.
|
|
|
|
*/
|
2022-06-17 17:38:56 -03:00
|
|
|
void input_poll_net(netplay_t *netplay);
|
2016-09-14 23:19:47 -04:00
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/***************************************************************
|
|
|
|
* NETPLAY-INIT.C
|
|
|
|
**************************************************************/
|
2016-12-10 20:36:57 -05:00
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/**
|
|
|
|
* netplay_wait_and_init_serialization
|
|
|
|
*
|
2021-11-06 00:27:33 +01:00
|
|
|
* Try very hard to initialize serialization, simulating
|
|
|
|
* multiple frames if necessary. For quirky cores.
|
2016-12-15 08:42:03 -05:00
|
|
|
*
|
|
|
|
* Returns true if serialization is now ready, false otherwise.
|
|
|
|
*/
|
|
|
|
bool netplay_wait_and_init_serialization(netplay_t *netplay);
|
2016-12-13 20:16:22 -05:00
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/***************************************************************
|
|
|
|
* NETPLAY-IO.C
|
|
|
|
**************************************************************/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* netplay_send_cur_input
|
|
|
|
*
|
|
|
|
* Send the current input frame to a given connection.
|
|
|
|
*
|
|
|
|
* Returns true if successful, false otherwise.
|
|
|
|
*/
|
|
|
|
bool netplay_send_cur_input(netplay_t *netplay,
|
|
|
|
struct netplay_connection *connection);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* netplay_send_raw_cmd
|
|
|
|
*
|
|
|
|
* Send a raw Netplay command to the given connection.
|
|
|
|
*
|
|
|
|
* Returns true on success, false on failure.
|
|
|
|
*/
|
2016-12-13 20:16:22 -05:00
|
|
|
bool netplay_send_raw_cmd(netplay_t *netplay,
|
|
|
|
struct netplay_connection *connection, uint32_t cmd, const void *data,
|
|
|
|
size_t size);
|
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/**
|
|
|
|
* netplay_send_raw_cmd_all
|
|
|
|
*
|
2021-11-06 00:27:33 +01:00
|
|
|
* Send a raw Netplay command to all connections,
|
|
|
|
* optionally excluding one
|
2016-12-15 08:42:03 -05:00
|
|
|
* (typically the client that the relevant command came from)
|
|
|
|
*/
|
2016-12-13 20:16:22 -05:00
|
|
|
void netplay_send_raw_cmd_all(netplay_t *netplay,
|
|
|
|
struct netplay_connection *except, uint32_t cmd, const void *data,
|
|
|
|
size_t size);
|
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/**
|
|
|
|
* netplay_cmd_mode
|
|
|
|
*
|
2021-11-06 00:27:33 +01:00
|
|
|
* Send a mode change request. As a server,
|
|
|
|
* the request is to ourself, and so honored instantly.
|
2016-12-15 08:42:03 -05:00
|
|
|
*/
|
|
|
|
bool netplay_cmd_mode(netplay_t *netplay,
|
|
|
|
enum rarch_netplay_connection_mode mode);
|
2016-12-02 19:49:42 -05:00
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/***************************************************************
|
|
|
|
* NETPLAY-SYNC.C
|
|
|
|
**************************************************************/
|
2016-09-25 11:05:50 -04:00
|
|
|
|
2016-12-15 08:42:03 -05:00
|
|
|
/**
|
2022-06-17 17:38:56 -03:00
|
|
|
* netplay_load_savestate
|
2016-12-15 08:42:03 -05:00
|
|
|
* @netplay : pointer to netplay object
|
2022-06-17 17:38:56 -03:00
|
|
|
* @serial_info : the savestate being loaded, NULL means
|
|
|
|
* "load it yourself"
|
|
|
|
* @save : Whether to save the provided serial_info
|
|
|
|
* into the frame buffer
|
|
|
|
*
|
|
|
|
* Inform Netplay of a savestate load and send it to the other side
|
|
|
|
**/
|
|
|
|
void netplay_load_savestate(netplay_t *netplay,
|
|
|
|
retro_ctx_serialize_info_t *serial_info, bool save);
|
2015-12-26 08:10:37 +01:00
|
|
|
#endif
|