2015-12-23 13:25:28 -07:00
|
|
|
/* RetroArch - A frontend for libretro.
|
|
|
|
* Copyright (C) 2010-2014 - Hans-Kristian Arntzen
|
2016-01-10 04:06:50 +01:00
|
|
|
* Copyright (C) 2011-2016 - Daniel De Matteis
|
2016-12-02 18:56:29 -05:00
|
|
|
* Copyright (C) 2016 - Gregor Richards
|
2015-12-23 13:25:28 -07:00
|
|
|
*
|
|
|
|
* RetroArch is free software: you can redistribute it and/or modify it under the terms
|
|
|
|
* of the GNU General Public License as published by the Free Software Found-
|
|
|
|
* ation, either version 3 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* RetroArch is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
|
|
|
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE. See the GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with RetroArch.
|
|
|
|
* If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __RARCH_NETPLAY_PRIVATE_H
|
|
|
|
#define __RARCH_NETPLAY_PRIVATE_H
|
2016-05-12 10:20:14 +02:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
#include "netplay.h"
|
2016-05-09 20:30:47 +02:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
#include <net/net_compat.h>
|
2016-11-29 22:59:46 -05:00
|
|
|
#include <net/net_natt.h>
|
2016-09-13 17:33:26 -04:00
|
|
|
#include <features/features_cpu.h>
|
2016-11-26 16:06:52 -05:00
|
|
|
#include <streams/trans_stream.h>
|
2015-12-23 13:25:28 -07:00
|
|
|
#include <retro_endianness.h>
|
2016-05-09 20:30:47 +02:00
|
|
|
|
2016-09-06 06:11:44 +02:00
|
|
|
#include "../../core.h"
|
2016-05-19 11:46:54 +02:00
|
|
|
#include "../../msg_hash.h"
|
|
|
|
#include "../../verbosity.h"
|
2015-12-23 13:25:28 -07:00
|
|
|
|
|
|
|
#ifdef ANDROID
|
|
|
|
#define HAVE_IPV6
|
|
|
|
#endif
|
|
|
|
|
2016-09-12 07:42:35 -04:00
|
|
|
#define WORDS_PER_FRAME 4 /* Allows us to send 128 bits worth of state per frame. */
|
|
|
|
#define MAX_SPECTATORS 16
|
|
|
|
#define RARCH_DEFAULT_PORT 55435
|
2016-12-02 22:40:26 -05:00
|
|
|
#define RARCH_DEFAULT_NICK "Anonymous"
|
2015-12-23 13:25:28 -07:00
|
|
|
|
2016-10-30 14:28:45 -04:00
|
|
|
#define NETPLAY_PROTOCOL_VERSION 3
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
#define PREV_PTR(x) ((x) == 0 ? netplay->buffer_size - 1 : (x) - 1)
|
|
|
|
#define NEXT_PTR(x) ((x + 1) % netplay->buffer_size)
|
|
|
|
|
2016-09-30 13:31:58 -04:00
|
|
|
/* Quirks mandated by how particular cores save states. This is distilled from
|
|
|
|
* the larger set of quirks that the quirks environment can communicate. */
|
|
|
|
#define NETPLAY_QUIRK_NO_SAVESTATES (1<<0)
|
|
|
|
#define NETPLAY_QUIRK_NO_TRANSMISSION (1<<1)
|
|
|
|
#define NETPLAY_QUIRK_INITIALIZATION (1<<2)
|
2016-10-05 21:12:42 -04:00
|
|
|
#define NETPLAY_QUIRK_ENDIAN_DEPENDENT (1<<3)
|
|
|
|
#define NETPLAY_QUIRK_PLATFORM_DEPENDENT (1<<4)
|
2016-09-30 13:31:58 -04:00
|
|
|
|
|
|
|
/* Mapping of serialization quirks to netplay quirks. */
|
|
|
|
#define NETPLAY_QUIRK_MAP_UNDERSTOOD \
|
|
|
|
(RETRO_SERIALIZATION_QUIRK_INCOMPLETE \
|
|
|
|
|RETRO_SERIALIZATION_QUIRK_MUST_INITIALIZE \
|
|
|
|
|RETRO_SERIALIZATION_QUIRK_SINGLE_SESSION \
|
2016-10-03 16:38:38 -04:00
|
|
|
|RETRO_SERIALIZATION_QUIRK_ENDIAN_DEPENDENT \
|
|
|
|
|RETRO_SERIALIZATION_QUIRK_PLATFORM_DEPENDENT)
|
2016-09-30 13:31:58 -04:00
|
|
|
#define NETPLAY_QUIRK_MAP_NO_SAVESTATES \
|
|
|
|
(RETRO_SERIALIZATION_QUIRK_INCOMPLETE)
|
|
|
|
#define NETPLAY_QUIRK_MAP_NO_TRANSMISSION \
|
2016-10-05 21:12:42 -04:00
|
|
|
(RETRO_SERIALIZATION_QUIRK_SINGLE_SESSION)
|
2016-09-30 13:31:58 -04:00
|
|
|
#define NETPLAY_QUIRK_MAP_INITIALIZATION \
|
|
|
|
(RETRO_SERIALIZATION_QUIRK_MUST_INITIALIZE)
|
2016-10-05 21:12:42 -04:00
|
|
|
#define NETPLAY_QUIRK_MAP_ENDIAN_DEPENDENT \
|
|
|
|
(RETRO_SERIALIZATION_QUIRK_ENDIAN_DEPENDENT)
|
|
|
|
#define NETPLAY_QUIRK_MAP_PLATFORM_DEPENDENT \
|
|
|
|
(RETRO_SERIALIZATION_QUIRK_PLATFORM_DEPENDENT)
|
2016-09-30 13:31:58 -04:00
|
|
|
|
2016-11-25 11:03:12 -05:00
|
|
|
/* Compression protocols supported */
|
|
|
|
#define NETPLAY_COMPRESSION_ZLIB (1<<0)
|
|
|
|
#if HAVE_ZLIB
|
|
|
|
#define NETPLAY_COMPRESSION_SUPPORTED NETPLAY_COMPRESSION_ZLIB
|
|
|
|
#else
|
|
|
|
#define NETPLAY_COMPRESSION_SUPPORTED 0
|
|
|
|
#endif
|
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
struct delta_frame
|
|
|
|
{
|
Bugfixes to bring Netplay Nouveau from "kinda working" to "stably
working":
(1) Fixups to the stall logic to make sure it always receives frames
while stalling :)
(2) Disused the used_real field. It was misconfigured and would
frequently claim to be using real data when real data hadn't been
used... this means more replays for now, but used_real will be readded.
(TODO)
(3) Stall duration is now related to sync frames, and thus configurable.
(4) Delta frames were having the good ol' initialization problem, as
frame==0 was indistinguishable from unused. Quickfixed by adding a
"used" field, but maybe there's a better way.
(5) If serialization fails, switch immediately to blocking mode
(stall_frames = 0). Blocking mode barely works, but if serialization
fails, no mode will work!
(6) I'm not sure which bug my replaying-from-previous-frame was trying
to fix, but the correct behavior is to replay from the last frame we had
vital information, not the frame prior. Notionally this should just be
an efficiency thing, but unsigned arithmetic at 0 made this a "just
ignore all input from now on" thing.
2016-09-12 17:50:38 -04:00
|
|
|
bool used; /* a bit derpy, but this is how we know if the delta's been used at all */
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
uint32_t frame;
|
|
|
|
|
2016-09-14 23:19:47 -04:00
|
|
|
/* The serialized state of the core at this frame, before input */
|
2015-12-23 13:25:28 -07:00
|
|
|
void *state;
|
|
|
|
|
2016-09-14 23:19:47 -04:00
|
|
|
/* The CRC-32 of the serialized state if we've calculated it, else 0 */
|
|
|
|
uint32_t crc;
|
|
|
|
|
2016-09-12 07:42:35 -04:00
|
|
|
uint32_t real_input_state[WORDS_PER_FRAME - 1];
|
|
|
|
uint32_t simulated_input_state[WORDS_PER_FRAME - 1];
|
|
|
|
uint32_t self_state[WORDS_PER_FRAME - 1];
|
2015-12-23 13:25:28 -07:00
|
|
|
|
2016-09-12 09:13:26 -04:00
|
|
|
/* Have we read local input? */
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
bool have_local;
|
2016-09-12 09:13:26 -04:00
|
|
|
|
2016-09-12 09:18:27 -04:00
|
|
|
/* Have we read the real remote input? */
|
|
|
|
bool have_remote;
|
2016-09-12 09:13:26 -04:00
|
|
|
|
2016-09-12 09:18:27 -04:00
|
|
|
/* Is the current state as of self_frame_count using the real remote data? */
|
2015-12-23 13:25:28 -07:00
|
|
|
bool used_real;
|
|
|
|
};
|
|
|
|
|
2016-09-25 11:05:50 -04:00
|
|
|
struct socket_buffer
|
|
|
|
{
|
|
|
|
unsigned char *data;
|
|
|
|
size_t bufsz;
|
|
|
|
size_t start, end;
|
|
|
|
size_t read;
|
|
|
|
};
|
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
struct netplay_callbacks {
|
2016-09-15 23:04:48 -04:00
|
|
|
bool (*pre_frame) (netplay_t *netplay);
|
2015-12-23 13:25:28 -07:00
|
|
|
void (*post_frame)(netplay_t *netplay);
|
|
|
|
bool (*info_cb) (netplay_t *netplay, unsigned frames);
|
|
|
|
};
|
|
|
|
|
2016-09-12 09:13:26 -04:00
|
|
|
enum rarch_netplay_stall_reasons
|
|
|
|
{
|
|
|
|
RARCH_NETPLAY_STALL_NONE = 0,
|
2016-09-21 17:23:36 -04:00
|
|
|
RARCH_NETPLAY_STALL_RUNNING_FAST,
|
|
|
|
RARCH_NETPLAY_STALL_NO_CONNECTION
|
2016-09-12 09:13:26 -04:00
|
|
|
};
|
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
struct netplay
|
|
|
|
{
|
|
|
|
char nick[32];
|
|
|
|
char other_nick[32];
|
|
|
|
struct sockaddr_storage other_addr;
|
|
|
|
|
|
|
|
struct retro_callbacks cbs;
|
|
|
|
/* TCP connection for state sending, etc. Also used for commands */
|
|
|
|
int fd;
|
2016-09-21 22:36:29 -04:00
|
|
|
/* TCP port (if serving) */
|
|
|
|
uint16_t tcp_port;
|
2016-11-29 22:59:46 -05:00
|
|
|
/* NAT traversal info (if NAT traversal is used and serving) */
|
|
|
|
bool nat_traversal;
|
|
|
|
struct natt_status nat_traversal_state;
|
2015-12-23 13:25:28 -07:00
|
|
|
/* Which port is governed by netplay (other user)? */
|
|
|
|
unsigned port;
|
|
|
|
bool has_connection;
|
|
|
|
|
|
|
|
struct delta_frame *buffer;
|
|
|
|
size_t buffer_size;
|
|
|
|
|
2016-11-26 16:06:52 -05:00
|
|
|
/* Compression transcoder */
|
|
|
|
const struct trans_stream_backend *compression_backend;
|
|
|
|
void *compression_stream;
|
|
|
|
const struct trans_stream_backend *decompression_backend;
|
|
|
|
void *decompression_stream;
|
2016-11-25 11:03:12 -05:00
|
|
|
|
2016-10-30 14:27:43 -04:00
|
|
|
/* A buffer into which to compress frames for transfer */
|
|
|
|
uint8_t *zbuffer;
|
|
|
|
size_t zbuffer_size;
|
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
/* Pointer where we are now. */
|
|
|
|
size_t self_ptr;
|
|
|
|
/* Points to the last reliable state that self ever had. */
|
|
|
|
size_t other_ptr;
|
|
|
|
/* Pointer to where we are reading.
|
|
|
|
* Generally, other_ptr <= read_ptr <= self_ptr. */
|
|
|
|
size_t read_ptr;
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
/* A pointer used temporarily for replay. */
|
|
|
|
size_t replay_ptr;
|
2015-12-23 13:25:28 -07:00
|
|
|
|
|
|
|
size_t state_size;
|
|
|
|
|
|
|
|
/* Are we replaying old frames? */
|
|
|
|
bool is_replay;
|
2016-09-14 18:03:40 -04:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
/* We don't want to poll several times on a frame. */
|
|
|
|
bool can_poll;
|
2016-09-14 18:03:40 -04:00
|
|
|
|
|
|
|
/* Force a rewind to other_frame_count/other_ptr. This is for synchronized
|
|
|
|
* events, such as player flipping or savestate loading. */
|
|
|
|
bool force_rewind;
|
2015-12-23 13:25:28 -07:00
|
|
|
|
2016-09-30 13:31:58 -04:00
|
|
|
/* Quirks in the savestate implementation */
|
2016-09-30 15:37:02 -04:00
|
|
|
uint64_t quirks;
|
2016-09-21 17:23:36 -04:00
|
|
|
|
2016-09-14 23:19:47 -04:00
|
|
|
/* Force our state to be sent to the other side. Used when they request a
|
|
|
|
* savestate, to send at the next pre-frame. */
|
|
|
|
bool force_send_savestate;
|
|
|
|
|
|
|
|
/* Have we requested a savestate as a sync point? */
|
|
|
|
bool savestate_request_outstanding;
|
|
|
|
|
2016-09-12 07:42:35 -04:00
|
|
|
/* A buffer for outgoing input packets. */
|
2016-09-25 11:05:50 -04:00
|
|
|
uint32_t input_packet_buffer[2 + WORDS_PER_FRAME];
|
|
|
|
|
|
|
|
/* And buffers for sending and receiving our actual data */
|
|
|
|
struct socket_buffer send_packet_buffer, recv_packet_buffer;
|
|
|
|
|
|
|
|
/* All of our frame counts */
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
uint32_t self_frame_count;
|
2015-12-23 13:25:28 -07:00
|
|
|
uint32_t read_frame_count;
|
|
|
|
uint32_t other_frame_count;
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
uint32_t replay_frame_count;
|
2016-09-25 11:05:50 -04:00
|
|
|
|
|
|
|
/* And socket info */
|
2015-12-23 13:25:28 -07:00
|
|
|
struct addrinfo *addr;
|
|
|
|
struct sockaddr_storage their_addr;
|
|
|
|
bool has_client_addr;
|
|
|
|
|
|
|
|
unsigned timeout_cnt;
|
|
|
|
|
|
|
|
/* Spectating. */
|
|
|
|
struct {
|
|
|
|
bool enabled;
|
|
|
|
int fds[MAX_SPECTATORS];
|
2016-09-15 23:04:48 -04:00
|
|
|
uint32_t frames[MAX_SPECTATORS];
|
2015-12-23 13:25:28 -07:00
|
|
|
uint16_t *input;
|
|
|
|
size_t input_ptr;
|
|
|
|
size_t input_sz;
|
|
|
|
} spectate;
|
|
|
|
bool is_server;
|
2016-09-15 18:51:56 -04:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
/* User flipping
|
2016-09-15 18:51:56 -04:00
|
|
|
* Flipping state. If frame >= flip_frame, we apply the flip.
|
|
|
|
* If not, we apply the opposite, effectively creating a trigger point. */
|
2015-12-23 13:25:28 -07:00
|
|
|
bool flip;
|
|
|
|
uint32_t flip_frame;
|
|
|
|
|
|
|
|
/* Netplay pausing
|
|
|
|
*/
|
2016-09-14 14:25:54 -04:00
|
|
|
bool local_paused;
|
|
|
|
bool remote_paused;
|
2015-12-23 13:25:28 -07:00
|
|
|
|
2016-09-12 09:13:26 -04:00
|
|
|
/* And stalling */
|
2016-12-01 13:34:37 -05:00
|
|
|
uint32_t delay_frames;
|
2016-09-12 09:13:26 -04:00
|
|
|
int stall;
|
2016-09-13 17:33:26 -04:00
|
|
|
retro_time_t stall_time;
|
2016-09-12 09:13:26 -04:00
|
|
|
|
2016-09-14 23:54:18 -04:00
|
|
|
/* Frequency with which to check CRCs */
|
|
|
|
uint32_t check_frames;
|
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
struct netplay_callbacks* net_cbs;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct netplay_callbacks* netplay_get_cbs_net(void);
|
2016-09-03 07:48:25 +02:00
|
|
|
|
2015-12-23 13:25:28 -07:00
|
|
|
struct netplay_callbacks* netplay_get_cbs_spectate(void);
|
2016-09-03 07:48:25 +02:00
|
|
|
|
2016-09-30 14:03:18 -04:00
|
|
|
/* Normally called at init time, unless the INITIALIZATION quirk is set */
|
|
|
|
bool netplay_init_serialization(netplay_t *netplay);
|
|
|
|
|
|
|
|
/* Force serialization to be ready by fast-forwarding the core */
|
|
|
|
bool netplay_wait_and_init_serialization(netplay_t *netplay);
|
|
|
|
|
2016-12-18 00:46:02 -05:00
|
|
|
void netplay_simulate_input(netplay_t *netplay, uint32_t sim_ptr, bool resim);
|
2016-09-24 08:12:08 -04:00
|
|
|
|
2016-05-12 12:03:43 +02:00
|
|
|
void netplay_log_connection(const struct sockaddr_storage *their_addr,
|
2015-12-23 13:25:28 -07:00
|
|
|
unsigned slot, const char *nick);
|
|
|
|
|
2016-05-12 12:03:43 +02:00
|
|
|
bool netplay_get_nickname(netplay_t *netplay, int fd);
|
2016-09-03 07:48:25 +02:00
|
|
|
|
2016-05-12 12:03:43 +02:00
|
|
|
bool netplay_send_nickname(netplay_t *netplay, int fd);
|
2016-09-03 07:48:25 +02:00
|
|
|
|
2016-10-05 21:12:42 -04:00
|
|
|
bool netplay_handshake(netplay_t *netplay);
|
2016-09-03 07:48:25 +02:00
|
|
|
|
2016-05-12 12:03:43 +02:00
|
|
|
uint32_t netplay_impl_magic(void);
|
2016-09-03 07:48:25 +02:00
|
|
|
|
2016-05-12 10:20:14 +02:00
|
|
|
bool netplay_is_server(netplay_t* netplay);
|
2016-09-03 07:48:25 +02:00
|
|
|
|
2016-05-12 12:03:43 +02:00
|
|
|
bool netplay_is_spectate(netplay_t* netplay);
|
2016-05-12 10:20:14 +02:00
|
|
|
|
Multitudinous fixes and updates to Netplay. Had to be one commit since
they're mostly related:
(1) Renamed frame_count to self_frame_count to be consistent with all
other names.
(2) Previously, it was possible to overwrite data in the ring buffer
that hadn't yet been used. Now that's not possible, but that just
changes one breakage for another: It's now possible to miss the NEW
data. The final resolution for this will probably be requesting stalls.
This is accomplished simply by storing frame numbers in the ring buffer
and checking them against the 'other' head.
(3) In TCP packets, separated cmd_size from cmd. It was beyond pointless
for these to be combined, and restricted cmd_size to 16 bits, which
will probably fail when/if state loading is supported.
(4) Readahead is now allowed. In the past, if the peer got ahead of us,
we would simply ignore their data. Thus, if they got too far ahead of
us, we'd stop reading their data altogether. Fabulous. Now, we're happy
to read future input.
(5) If the peer gets too far ahead of us (currently an unconfigurable 10
frames), fast forward to catch up. This should prevent desync due to
clock drift or stutter.
(6) Used frame_count in a few places where ptr was used. Doing a
comparison of pointers on a ring buffer is a far more dangerous way to
assure we're done with a task than simply using the count, since the
ring buffer is... well, a ring.
(7) Renamed tmp_{ptr,frame_count} to replay_{ptr,frame_count} for
clarity.
(8) Slightly changed the protocol version hash, just to assure that
other clients wouldn't think they were compatible with this one.
(9) There was an off-by-one error which, under some circumstances, could
allow the replay engine to run a complete round through the ring buffer,
replaying stale data. Fixed.
2016-09-11 22:01:47 -04:00
|
|
|
bool netplay_delta_frame_ready(netplay_t *netplay, struct delta_frame *delta, uint32_t frame);
|
|
|
|
|
2016-09-14 23:19:47 -04:00
|
|
|
uint32_t netplay_delta_frame_crc(netplay_t *netplay, struct delta_frame *delta);
|
|
|
|
|
|
|
|
bool netplay_cmd_crc(netplay_t *netplay, struct delta_frame *delta);
|
|
|
|
|
|
|
|
bool netplay_cmd_request_savestate(netplay_t *netplay);
|
|
|
|
|
2016-12-02 19:49:42 -05:00
|
|
|
/* DISCOVERY: */
|
|
|
|
|
|
|
|
bool netplay_lan_ad_server(netplay_t *netplay);
|
2016-10-10 11:52:54 -04:00
|
|
|
|
2016-09-25 11:05:50 -04:00
|
|
|
bool netplay_init_socket_buffer(struct socket_buffer *sbuf, size_t size);
|
|
|
|
|
|
|
|
void netplay_deinit_socket_buffer(struct socket_buffer *sbuf);
|
|
|
|
|
|
|
|
void netplay_clear_socket_buffer(struct socket_buffer *sbuf);
|
|
|
|
|
|
|
|
bool netplay_send(struct socket_buffer *sbuf, int sockfd, const void *buf, size_t len);
|
|
|
|
|
|
|
|
bool netplay_send_flush(struct socket_buffer *sbuf, int sockfd, bool block);
|
|
|
|
|
|
|
|
ssize_t netplay_recv(struct socket_buffer *sbuf, int sockfd, void *buf, size_t len, bool block);
|
|
|
|
|
|
|
|
void netplay_recv_reset(struct socket_buffer *sbuf);
|
|
|
|
|
|
|
|
void netplay_recv_flush(struct socket_buffer *sbuf);
|
|
|
|
|
2015-12-26 08:10:37 +01:00
|
|
|
#endif
|