Understand the latency between nodes across the Realtime cluster.
-
- <%= for {_pair, p} <- @pings do %>
-
-
From: <%= p.from_region %> - <%= p.from_node %>
-
To: <%= p.region %> - <%= p.node %>
-
<%= p.latency %> ms
-
<%= p.timestamp %>
-
- <% end %>
+
+
+
From: <%= p.payload.from_region %> - <%= p.payload.from_node %>
+
To: <%= p.payload.region %> - <%= p.payload.node %>
+
<%= p.payload.latency %> ms
+
<%= p.payload.timestamp %>
+
diff --git a/lib/realtime_web/plugs/assign_tenant.ex b/lib/realtime_web/plugs/assign_tenant.ex
index 69b52e8ab..b60d3e28a 100644
--- a/lib/realtime_web/plugs/assign_tenant.ex
+++ b/lib/realtime_web/plugs/assign_tenant.ex
@@ -20,7 +20,7 @@ defmodule RealtimeWeb.Plugs.AssignTenant do
def call(%Plug.Conn{host: host} = conn, _opts) do
with {:ok, external_id} <- Database.get_external_id(host),
- %Tenant{} = tenant <- Api.get_tenant_by_external_id(external_id) do
+ %Tenant{} = tenant <- Api.get_tenant_by_external_id(external_id, use_replica?: true) do
Logger.metadata(external_id: external_id, project: external_id)
OpenTelemetry.Tracer.set_attributes(external_id: external_id)
diff --git a/lib/realtime_web/plugs/auth_tenant.ex b/lib/realtime_web/plugs/auth_tenant.ex
index 11bf2e0bc..23c0581a8 100644
--- a/lib/realtime_web/plugs/auth_tenant.ex
+++ b/lib/realtime_web/plugs/auth_tenant.ex
@@ -42,6 +42,9 @@ defmodule RealtimeWeb.AuthTenant do
[] ->
nil
+ [""] ->
+ nil
+
[value | _] ->
[bearer, token] = value |> String.split(" ")
bearer = String.downcase(bearer)
diff --git a/lib/realtime_web/router.ex b/lib/realtime_web/router.ex
index 1e368f6d2..77aded263 100644
--- a/lib/realtime_web/router.ex
+++ b/lib/realtime_web/router.ex
@@ -76,6 +76,7 @@ defmodule RealtimeWeb.Router do
pipe_through(:metrics)
get("/", MetricsController, :index)
+ get("/:region", MetricsController, :region)
end
scope "/api" do
diff --git a/lib/realtime_web/socket/user_broadcast.ex b/lib/realtime_web/socket/user_broadcast.ex
new file mode 100644
index 000000000..7caba33ce
--- /dev/null
+++ b/lib/realtime_web/socket/user_broadcast.ex
@@ -0,0 +1,39 @@
+defmodule RealtimeWeb.Socket.UserBroadcast do
+ @moduledoc """
+ Defines a message sent from pubsub to channels and vice-versa.
+
+ The message format requires the following keys:
+
+ * `:topic` - The string topic or topic:subtopic pair namespace, for example "messages", "messages:123"
+ * `:user_event`- The string user event name, for example "my-event"
+ * `:user_payload_encoding`- :json or :binary
+ * `:user_payload` - The actual message payload
+
+ Optionally metadata which is a map to be JSON encoded
+ """
+
+ alias Phoenix.Socket.Broadcast
+
+ @type t :: %__MODULE__{}
+ defstruct topic: nil, user_event: nil, user_payload: nil, user_payload_encoding: nil, metadata: nil
+
+ @spec convert_to_json_broadcast(t) :: {:ok, Broadcast.t()} | {:error, String.t()}
+ def convert_to_json_broadcast(%__MODULE__{user_payload_encoding: :json} = user_broadcast) do
+ payload = %{
+ "event" => user_broadcast.user_event,
+ "payload" => Jason.Fragment.new(user_broadcast.user_payload),
+ "type" => "broadcast"
+ }
+
+ payload =
+ if user_broadcast.metadata do
+ Map.put(payload, "meta", user_broadcast.metadata)
+ else
+ payload
+ end
+
+ {:ok, %Broadcast{event: "broadcast", payload: payload, topic: user_broadcast.topic}}
+ end
+
+ def convert_to_json_broadcast(%__MODULE__{}), do: {:error, "User payload encoding is not JSON"}
+end
diff --git a/lib/realtime_web/socket/v2_serializer.ex b/lib/realtime_web/socket/v2_serializer.ex
new file mode 100644
index 000000000..ff50dab5d
--- /dev/null
+++ b/lib/realtime_web/socket/v2_serializer.ex
@@ -0,0 +1,232 @@
+defmodule RealtimeWeb.Socket.V2Serializer do
+ @moduledoc """
+ Custom serializer that is a superset of Phoenix's V2 JSONSerializer
+ that handles user broadcast and user broadcast push
+ """
+
+ @behaviour Phoenix.Socket.Serializer
+
+ @push 0
+ @reply 1
+ @broadcast 2
+ @user_broadcast_push 3
+ @user_broadcast 4
+
+ alias Phoenix.Socket.{Message, Reply, Broadcast}
+ alias RealtimeWeb.Socket.UserBroadcast
+
+ @impl true
+ def fastlane!(%UserBroadcast{} = msg) do
+ metadata =
+ if msg.metadata do
+ Phoenix.json_library().encode!(msg.metadata)
+ else
+ msg.metadata
+ end
+
+ topic_size = byte_size!(msg.topic, :topic, 255)
+ user_event_size = byte_size!(msg.user_event, :user_event, 255)
+ metadata_size = byte_size!(metadata, :metadata, 255)
+ user_payload_encoding = if msg.user_payload_encoding == :json, do: 1, else: 0
+
+ bin = <<
+ @user_broadcast::size(8),
+ topic_size::size(8),
+ user_event_size::size(8),
+ metadata_size::size(8),
+ user_payload_encoding::size(8),
+ msg.topic::binary-size(topic_size),
+ msg.user_event::binary-size(user_event_size),
+ metadata || <<>>::binary-size(metadata_size),
+ msg.user_payload::binary
+ >>
+
+ {:socket_push, :binary, bin}
+ end
+
+ def fastlane!(%Broadcast{payload: {:binary, data}} = msg) do
+ topic_size = byte_size!(msg.topic, :topic, 255)
+ event_size = byte_size!(msg.event, :event, 255)
+
+ bin = <<
+ @broadcast::size(8),
+ topic_size::size(8),
+ event_size::size(8),
+ msg.topic::binary-size(topic_size),
+ msg.event::binary-size(event_size),
+ data::binary
+ >>
+
+ {:socket_push, :binary, bin}
+ end
+
+ def fastlane!(%Broadcast{payload: %{}} = msg) do
+ data = Phoenix.json_library().encode_to_iodata!([nil, nil, msg.topic, msg.event, msg.payload])
+ {:socket_push, :text, data}
+ end
+
+ def fastlane!(%Broadcast{payload: invalid}) do
+ raise ArgumentError, "expected broadcasted payload to be a map, got: #{inspect(invalid)}"
+ end
+
+ @impl true
+ def encode!(%Reply{payload: {:binary, data}} = reply) do
+ status = to_string(reply.status)
+ join_ref = to_string(reply.join_ref)
+ ref = to_string(reply.ref)
+ join_ref_size = byte_size!(join_ref, :join_ref, 255)
+ ref_size = byte_size!(ref, :ref, 255)
+ topic_size = byte_size!(reply.topic, :topic, 255)
+ status_size = byte_size!(status, :status, 255)
+
+ bin = <<
+ @reply::size(8),
+ join_ref_size::size(8),
+ ref_size::size(8),
+ topic_size::size(8),
+ status_size::size(8),
+ join_ref::binary-size(join_ref_size),
+ ref::binary-size(ref_size),
+ reply.topic::binary-size(topic_size),
+ status::binary-size(status_size),
+ data::binary
+ >>
+
+ {:socket_push, :binary, bin}
+ end
+
+ def encode!(%Reply{} = reply) do
+ data = [
+ reply.join_ref,
+ reply.ref,
+ reply.topic,
+ "phx_reply",
+ %{status: reply.status, response: reply.payload}
+ ]
+
+ {:socket_push, :text, Phoenix.json_library().encode_to_iodata!(data)}
+ end
+
+ def encode!(%Message{payload: {:binary, data}} = msg) do
+ join_ref = to_string(msg.join_ref)
+ join_ref_size = byte_size!(join_ref, :join_ref, 255)
+ topic_size = byte_size!(msg.topic, :topic, 255)
+ event_size = byte_size!(msg.event, :event, 255)
+
+ bin = <<
+ @push::size(8),
+ join_ref_size::size(8),
+ topic_size::size(8),
+ event_size::size(8),
+ join_ref::binary-size(join_ref_size),
+ msg.topic::binary-size(topic_size),
+ msg.event::binary-size(event_size),
+ data::binary
+ >>
+
+ {:socket_push, :binary, bin}
+ end
+
+ def encode!(%Message{payload: %{}} = msg) do
+ data = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]
+ {:socket_push, :text, Phoenix.json_library().encode_to_iodata!(data)}
+ end
+
+ def encode!(%Message{payload: invalid}) do
+ raise ArgumentError, "expected payload to be a map, got: #{inspect(invalid)}"
+ end
+
+ @impl true
+ def decode!(raw_message, opts) do
+ case Keyword.fetch(opts, :opcode) do
+ {:ok, :text} -> decode_text(raw_message)
+ {:ok, :binary} -> decode_binary(raw_message)
+ end
+ end
+
+ defp decode_text(raw_message) do
+ [join_ref, ref, topic, event, payload | _] = Phoenix.json_library().decode!(raw_message)
+
+ %Message{
+ topic: topic,
+ event: event,
+ payload: payload,
+ ref: ref,
+ join_ref: join_ref
+ }
+ end
+
+ defp decode_binary(<<
+ @push::size(8),
+ join_ref_size::size(8),
+ ref_size::size(8),
+ topic_size::size(8),
+ event_size::size(8),
+ join_ref::binary-size(join_ref_size),
+ ref::binary-size(ref_size),
+ topic::binary-size(topic_size),
+ event::binary-size(event_size),
+ data::binary
+ >>) do
+ %Message{
+ topic: topic,
+ event: event,
+ payload: {:binary, data},
+ ref: ref,
+ join_ref: join_ref
+ }
+ end
+
+ defp decode_binary(<<
+ @user_broadcast_push::size(8),
+ join_ref_size::size(8),
+ ref_size::size(8),
+ topic_size::size(8),
+ user_event_size::size(8),
+ metadata_size::size(8),
+ user_payload_encoding::size(8),
+ join_ref::binary-size(join_ref_size),
+ ref::binary-size(ref_size),
+ topic::binary-size(topic_size),
+ user_event::binary-size(user_event_size),
+ metadata::binary-size(metadata_size),
+ user_payload::binary
+ >>) do
+ user_payload_encoding = if user_payload_encoding == 0, do: :binary, else: :json
+
+ metadata =
+ if metadata_size > 0 do
+ Phoenix.json_library().decode!(metadata)
+ else
+ %{}
+ end
+
+ # Encoding as Message because that's how Phoenix Socket and Channel.Server expects things to show up
+ # Here we abuse the payload field to carry a tuple of (user_event, user payload encoding, user payload, metadata)
+ %Message{
+ topic: topic,
+ event: "broadcast",
+ payload: {user_event, user_payload_encoding, user_payload, metadata},
+ ref: ref,
+ join_ref: join_ref
+ }
+ end
+
+ defp byte_size!(nil, _kind, _max), do: 0
+
+ defp byte_size!(bin, kind, max) do
+ case byte_size(bin) do
+ size when size <= max ->
+ size
+
+ oversized ->
+ raise ArgumentError, """
+ unable to convert #{kind} to binary.
+
+ #{inspect(bin)}
+
+ must be less than or equal to #{max} bytes, but is #{oversized} bytes.
+ """
+ end
+ end
+end
diff --git a/lib/realtime_web/tenant_broadcaster.ex b/lib/realtime_web/tenant_broadcaster.ex
index ee8646614..b1b878b5d 100644
--- a/lib/realtime_web/tenant_broadcaster.ex
+++ b/lib/realtime_web/tenant_broadcaster.ex
@@ -5,11 +5,49 @@ defmodule RealtimeWeb.TenantBroadcaster do
alias Phoenix.PubSub
- @spec pubsub_broadcast(tenant_id :: String.t(), PubSub.topic(), PubSub.message(), PubSub.dispatcher()) :: :ok
- def pubsub_broadcast(tenant_id, topic, message, dispatcher) do
- collect_payload_size(tenant_id, message)
+ @type message_type :: :broadcast | :presence | :postgres_changes
- Realtime.GenRpc.multicast(PubSub, :local_broadcast, [Realtime.PubSub, topic, message, dispatcher], key: topic)
+ @spec pubsub_direct_broadcast(
+ node :: node(),
+ tenant_id :: String.t(),
+ PubSub.topic(),
+ PubSub.message(),
+ PubSub.dispatcher(),
+ message_type
+ ) ::
+ :ok
+ def pubsub_direct_broadcast(node, tenant_id, topic, message, dispatcher, message_type) do
+ collect_payload_size(tenant_id, message, message_type)
+
+ do_direct_broadcast(node, topic, message, dispatcher)
+
+ :ok
+ end
+
+ # Remote
+ defp do_direct_broadcast(node, topic, message, dispatcher) when node != node() do
+ if pubsub_adapter() == :gen_rpc do
+ PubSub.direct_broadcast(node, Realtime.PubSub, topic, message, dispatcher)
+ else
+ Realtime.GenRpc.cast(node, PubSub, :local_broadcast, [Realtime.PubSub, topic, message, dispatcher], key: topic)
+ end
+ end
+
+ # Local
+ defp do_direct_broadcast(_node, topic, message, dispatcher) do
+ PubSub.local_broadcast(Realtime.PubSub, topic, message, dispatcher)
+ end
+
+ @spec pubsub_broadcast(tenant_id :: String.t(), PubSub.topic(), PubSub.message(), PubSub.dispatcher(), message_type) ::
+ :ok
+ def pubsub_broadcast(tenant_id, topic, message, dispatcher, message_type) do
+ collect_payload_size(tenant_id, message, message_type)
+
+ if pubsub_adapter() == :gen_rpc do
+ PubSub.broadcast(Realtime.PubSub, topic, message, dispatcher)
+ else
+ Realtime.GenRpc.multicast(PubSub, :local_broadcast, [Realtime.PubSub, topic, message, dispatcher], key: topic)
+ end
:ok
end
@@ -19,30 +57,41 @@ defmodule RealtimeWeb.TenantBroadcaster do
from :: pid,
PubSub.topic(),
PubSub.message(),
- PubSub.dispatcher()
+ PubSub.dispatcher(),
+ message_type
) ::
:ok
- def pubsub_broadcast_from(tenant_id, from, topic, message, dispatcher) do
- collect_payload_size(tenant_id, message)
+ def pubsub_broadcast_from(tenant_id, from, topic, message, dispatcher, message_type) do
+ collect_payload_size(tenant_id, message, message_type)
- Realtime.GenRpc.multicast(
- PubSub,
- :local_broadcast_from,
- [Realtime.PubSub, from, topic, message, dispatcher],
- key: topic
- )
+ if pubsub_adapter() == :gen_rpc do
+ PubSub.broadcast_from(Realtime.PubSub, from, topic, message, dispatcher)
+ else
+ Realtime.GenRpc.multicast(
+ PubSub,
+ :local_broadcast_from,
+ [Realtime.PubSub, from, topic, message, dispatcher],
+ key: topic
+ )
+ end
:ok
end
@payload_size_event [:realtime, :tenants, :payload, :size]
- defp collect_payload_size(tenant_id, payload) when is_struct(payload) do
+ @spec collect_payload_size(tenant_id :: String.t(), payload :: term, message_type :: message_type) :: :ok
+ def collect_payload_size(tenant_id, payload, message_type) when is_struct(payload) do
# Extracting from struct so the __struct__ bit is not calculated as part of the payload
- collect_payload_size(tenant_id, Map.from_struct(payload))
+ collect_payload_size(tenant_id, Map.from_struct(payload), message_type)
end
- defp collect_payload_size(tenant_id, payload) do
- :telemetry.execute(@payload_size_event, %{size: :erlang.external_size(payload)}, %{tenant: tenant_id})
+ def collect_payload_size(tenant_id, payload, message_type) do
+ :telemetry.execute(@payload_size_event, %{size: :erlang.external_size(payload)}, %{
+ tenant: tenant_id,
+ message_type: message_type
+ })
end
+
+ defp pubsub_adapter, do: Application.fetch_env!(:realtime, :pubsub_adapter)
end
diff --git a/mix.exs b/mix.exs
index d0f8a267b..9fb7c80a3 100644
--- a/mix.exs
+++ b/mix.exs
@@ -4,8 +4,8 @@ defmodule Realtime.MixProject do
def project do
[
app: :realtime,
- version: "2.46.2",
- elixir: "~> 1.17.3",
+ version: "2.70.0",
+ elixir: "~> 1.18",
elixirc_paths: elixirc_paths(Mix.env()),
start_permanent: Mix.env() == :prod,
aliases: aliases(),
@@ -53,7 +53,7 @@ defmodule Realtime.MixProject do
# Type `mix help deps` for examples and options.
defp deps do
[
- {:phoenix, "~> 1.7.0"},
+ {:phoenix, override: true, github: "supabase/phoenix", branch: "feat/presence-custom-dispatcher-1.7.19"},
{:phoenix_ecto, "~> 4.4.0"},
{:ecto_sql, "~> 3.11"},
{:ecto_psql_extras, "~> 0.8"},
@@ -65,7 +65,7 @@ defmodule Realtime.MixProject do
{:phoenix_view, "~> 2.0"},
{:esbuild, "~> 0.4", runtime: Mix.env() == :dev},
{:tailwind, "~> 0.1", runtime: Mix.env() == :dev},
- {:telemetry_metrics, "~> 0.6"},
+ {:telemetry_metrics, "~> 1.0"},
{:telemetry_poller, "~> 1.0"},
{:gettext, "~> 0.19"},
{:jason, "~> 1.3"},
@@ -73,13 +73,15 @@ defmodule Realtime.MixProject do
{:libcluster, "~> 3.3"},
{:libcluster_postgres, "~> 0.2"},
{:uuid, "~> 1.1"},
- {:prom_ex, "~> 1.8"},
+ {:prom_ex, "~> 1.10"},
+ {:peep, git: "https://github.com/supabase/peep.git", branch: "feat/partitions-ets", override: true},
{:joken, "~> 2.5.0"},
{:ex_json_schema, "~> 0.7"},
{:recon, "~> 2.5"},
{:mint, "~> 1.4"},
{:logflare_logger_backend, "~> 0.11"},
{:syn, "~> 3.3"},
+ {:beacon, path: "./beacon"},
{:cachex, "~> 4.0"},
{:open_api_spex, "~> 3.16"},
{:corsica, "~> 2.0"},
@@ -90,7 +92,7 @@ defmodule Realtime.MixProject do
{:opentelemetry_phoenix, "~> 2.0"},
{:opentelemetry_cowboy, "~> 1.0"},
{:opentelemetry_ecto, "~> 1.2"},
- {:gen_rpc, git: "https://github.com/supabase/gen_rpc.git", ref: "d161cf263c661a534eaabf80aac7a34484dac772"},
+ {:gen_rpc, git: "https://github.com/supabase/gen_rpc.git", ref: "5382a0f2689a4cb8838873a2173928281dbe5002"},
{:mimic, "~> 1.0", only: :test},
{:floki, ">= 0.30.0", only: :test},
{:mint_web_socket, "~> 1.0", only: :test},
@@ -121,7 +123,6 @@ defmodule Realtime.MixProject do
test: [
"cmd epmd -daemon",
"ecto.create --quiet",
- "run priv/repo/seeds_before_migration.exs",
"ecto.migrate --migrations-path=priv/repo/migrations",
"test"
],
diff --git a/mix.lock b/mix.lock
index 76eb0d980..b106e9cd7 100644
--- a/mix.lock
+++ b/mix.lock
@@ -3,39 +3,39 @@
"benchee": {:hex, :benchee, "1.1.0", "f3a43817209a92a1fade36ef36b86e1052627fd8934a8b937ac9ab3a76c43062", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}], "hexpm", "7da57d545003165a012b587077f6ba90b89210fd88074ce3c60ce239eb5e6d93"},
"bertex": {:hex, :bertex, "1.3.0", "0ad0df9159b5110d9d2b6654f72fbf42a54884ef43b6b651e6224c0af30ba3cb", [:mix], [], "hexpm", "0a5d5e478bb5764b7b7bae37cae1ca491200e58b089df121a2fe1c223d8ee57a"},
"bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"},
- "cachex": {:hex, :cachex, "4.0.3", "95e88c3ef4d37990948eaecccefe40b4ce4a778e0d7ade29081e6b7a89309ee2", [:mix], [{:eternal, "~> 1.2", [hex: :eternal, repo: "hexpm", optional: false]}, {:ex_hash_ring, "~> 6.0", [hex: :ex_hash_ring, repo: "hexpm", optional: false]}, {:jumper, "~> 1.0", [hex: :jumper, repo: "hexpm", optional: false]}, {:sleeplocks, "~> 1.1", [hex: :sleeplocks, repo: "hexpm", optional: false]}, {:unsafe, "~> 1.0", [hex: :unsafe, repo: "hexpm", optional: false]}], "hexpm", "d5d632da7f162f8a190f1c39b712c0ebc9cf0007c4e2029d44eddc8041b52d55"},
- "castore": {:hex, :castore, "1.0.11", "4bbd584741601eb658007339ea730b082cc61f3554cf2e8f39bf693a11b49073", [:mix], [], "hexpm", "e03990b4db988df56262852f20de0f659871c35154691427a5047f4967a16a62"},
+ "cachex": {:hex, :cachex, "4.1.1", "574c5cd28473db313a0a76aac8c945fe44191659538ca6a1e8946ec300b1a19f", [:mix], [{:eternal, "~> 1.2", [hex: :eternal, repo: "hexpm", optional: false]}, {:ex_hash_ring, "~> 6.0", [hex: :ex_hash_ring, repo: "hexpm", optional: false]}, {:jumper, "~> 1.0", [hex: :jumper, repo: "hexpm", optional: false]}, {:sleeplocks, "~> 1.1", [hex: :sleeplocks, repo: "hexpm", optional: false]}, {:unsafe, "~> 1.0", [hex: :unsafe, repo: "hexpm", optional: false]}], "hexpm", "d6b7449ff98d6bb92dda58bd4fc3189cae9f99e7042054d669596f56dc503cd8"},
+ "castore": {:hex, :castore, "1.0.15", "8aa930c890fe18b6fe0a0cff27b27d0d4d231867897bd23ea772dee561f032a3", [:mix], [], "hexpm", "96ce4c69d7d5d7a0761420ef743e2f4096253931a3ba69e5ff8ef1844fe446d3"},
"chatterbox": {:hex, :ts_chatterbox, "0.15.1", "5cac4d15dd7ad61fc3c4415ce4826fc563d4643dee897a558ec4ea0b1c835c9c", [:rebar3], [{:hpack, "~> 0.3.0", [hex: :hpack_erl, repo: "hexpm", optional: false]}], "hexpm", "4f75b91451338bc0da5f52f3480fa6ef6e3a2aeecfc33686d6b3d0a0948f31aa"},
"corsica": {:hex, :corsica, "2.1.3", "dccd094ffce38178acead9ae743180cdaffa388f35f0461ba1e8151d32e190e6", [:mix], [{:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "616c08f61a345780c2cf662ff226816f04d8868e12054e68963e95285b5be8bc"},
- "cowboy": {:hex, :cowboy, "2.12.0", "f276d521a1ff88b2b9b4c54d0e753da6c66dd7be6c9fca3d9418b561828a3731", [:make, :rebar3], [{:cowlib, "2.13.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "8a7abe6d183372ceb21caa2709bec928ab2b72e18a3911aa1771639bef82651e"},
+ "cowboy": {:hex, :cowboy, "2.14.2", "4008be1df6ade45e4f2a4e9e2d22b36d0b5aba4e20b0a0d7049e28d124e34847", [:make, :rebar3], [{:cowlib, ">= 2.16.0 and < 3.0.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, ">= 1.8.0 and < 3.0.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "569081da046e7b41b5df36aa359be71a0c8874e5b9cff6f747073fc57baf1ab9"},
"cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"},
- "cowlib": {:hex, :cowlib, "2.13.0", "db8f7505d8332d98ef50a3ef34b34c1afddec7506e4ee4dd4a3a266285d282ca", [:make, :rebar3], [], "hexpm", "e1e1284dc3fc030a64b1ad0d8382ae7e99da46c3246b815318a4b848873800a4"},
- "credo": {:hex, :credo, "1.7.11", "d3e805f7ddf6c9c854fd36f089649d7cf6ba74c42bc3795d587814e3c9847102", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "56826b4306843253a66e47ae45e98e7d284ee1f95d53d1612bb483f88a8cf219"},
+ "cowlib": {:hex, :cowlib, "2.16.0", "54592074ebbbb92ee4746c8a8846e5605052f29309d3a873468d76cdf932076f", [:make, :rebar3], [], "hexpm", "7f478d80d66b747344f0ea7708c187645cfcc08b11aa424632f78e25bf05db51"},
+ "credo": {:hex, :credo, "1.7.13", "126a0697df6b7b71cd18c81bc92335297839a806b6f62b61d417500d1070ff4e", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "47641e6d2bbff1e241e87695b29f617f1a8f912adea34296fb10ecc3d7e9e84f"},
"ctx": {:hex, :ctx, "0.6.0", "8ff88b70e6400c4df90142e7f130625b82086077a45364a78d208ed3ed53c7fe", [:rebar3], [], "hexpm", "a14ed2d1b67723dbebbe423b28d7615eb0bdcba6ff28f2d1f1b0a7e1d4aa5fc2"},
- "db_connection": {:hex, :db_connection, "2.8.0", "64fd82cfa6d8e25ec6660cea73e92a4cbc6a18b31343910427b702838c4b33b2", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "008399dae5eee1bf5caa6e86d204dcb44242c82b1ed5e22c881f2c34da201b15"},
+ "db_connection": {:hex, :db_connection, "2.8.1", "9abdc1e68c34c6163f6fb96a96532272d13ad7ca45262156ae8b7ec6d9dc4bec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a61a3d489b239d76f326e03b98794fb8e45168396c925ef25feb405ed09da8fd"},
"decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"},
"deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"},
- "dialyxir": {:hex, :dialyxir, "1.4.5", "ca1571ac18e0f88d4ab245f0b60fa31ff1b12cbae2b11bd25d207f865e8ae78a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "b0fb08bb8107c750db5c0b324fa2df5ceaa0f9307690ee3c1f6ba5b9eb5d35c3"},
- "ecto": {:hex, :ecto, "3.13.2", "7d0c0863f3fc8d71d17fc3ad3b9424beae13f02712ad84191a826c7169484f01", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "669d9291370513ff56e7b7e7081b7af3283d02e046cf3d403053c557894a0b3e"},
+ "dialyxir": {:hex, :dialyxir, "1.4.6", "7cca478334bf8307e968664343cbdb432ee95b4b68a9cba95bdabb0ad5bdfd9a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "8cf5615c5cd4c2da6c501faae642839c8405b49f8aa057ad4ae401cb808ef64d"},
+ "ecto": {:hex, :ecto, "3.13.3", "6a983f0917f8bdc7a89e96f2bf013f220503a0da5d8623224ba987515b3f0d80", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1927db768f53a88843ff25b6ba7946599a8ca8a055f69ad8058a1432a399af94"},
"ecto_psql_extras": {:hex, :ecto_psql_extras, "0.8.8", "aa02529c97f69aed5722899f5dc6360128735a92dd169f23c5d50b1f7fdede08", [:mix], [{:ecto_sql, "~> 3.7", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:postgrex, "> 0.16.0", [hex: :postgrex, repo: "hexpm", optional: false]}, {:table_rex, "~> 3.1.1 or ~> 4.0", [hex: :table_rex, repo: "hexpm", optional: false]}], "hexpm", "04c63d92b141723ad6fed2e60a4b461ca00b3594d16df47bbc48f1f4534f2c49"},
"ecto_sql": {:hex, :ecto_sql, "3.13.2", "a07d2461d84107b3d037097c822ffdd36ed69d1cf7c0f70e12a3d1decf04e2e1", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.13.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "539274ab0ecf1a0078a6a72ef3465629e4d6018a3028095dc90f60a19c371717"},
"erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"},
- "esbuild": {:hex, :esbuild, "0.8.2", "5f379dfa383ef482b738e7771daf238b2d1cfb0222bef9d3b20d4c8f06c7a7ac", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "558a8a08ed78eb820efbfda1de196569d8bfa9b51e8371a1934fbb31345feda7"},
+ "esbuild": {:hex, :esbuild, "0.10.0", "b0aa3388a1c23e727c5a3e7427c932d89ee791746b0081bbe56103e9ef3d291f", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "468489cda427b974a7cc9f03ace55368a83e1a7be12fba7e30969af78e5f8c70"},
"eternal": {:hex, :eternal, "1.2.2", "d1641c86368de99375b98d183042dd6c2b234262b8d08dfd72b9eeaafc2a1abd", [:mix], [], "hexpm", "2c9fe32b9c3726703ba5e1d43a1d255a4f3f2d8f8f9bc19f094c7cb1a7a9e782"},
"ex_hash_ring": {:hex, :ex_hash_ring, "6.0.4", "bef9d2d796afbbe25ab5b5a7ed746e06b99c76604f558113c273466d52fa6d6b", [:mix], [], "hexpm", "89adabf31f7d3dfaa36802ce598ce918e9b5b33bae8909ac1a4d052e1e567d18"},
- "ex_json_schema": {:hex, :ex_json_schema, "0.10.2", "7c4b8c1481fdeb1741e2ce66223976edfb9bccebc8014f6aec35d4efe964fb71", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "37f43be60f8407659d4d0155a7e45e7f406dab1f827051d3d35858a709baf6a6"},
- "excoveralls": {:hex, :excoveralls, "0.18.3", "bca47a24d69a3179951f51f1db6d3ed63bca9017f476fe520eb78602d45f7756", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "746f404fcd09d5029f1b211739afb8fb8575d775b21f6a3908e7ce3e640724c6"},
+ "ex_json_schema": {:hex, :ex_json_schema, "0.11.1", "b593f92937a095f66054bb318681397dfe7304e7d2b6b1a7534ea3aa40024f8c", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "32d651a575a6ce2fd613f140b0fef8dd0acc7cf8e8bcd29a3a1be5c945700dd5"},
+ "excoveralls": {:hex, :excoveralls, "0.18.5", "e229d0a65982613332ec30f07940038fe451a2e5b29bce2a5022165f0c9b157e", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "523fe8a15603f86d64852aab2abe8ddbd78e68579c8525ae765facc5eae01562"},
"expo": {:hex, :expo, "1.1.0", "f7b9ed7fb5745ebe1eeedf3d6f29226c5dd52897ac67c0f8af62a07e661e5c75", [:mix], [], "hexpm", "fbadf93f4700fb44c331362177bdca9eeb8097e8b0ef525c9cc501cb9917c960"},
- "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"},
- "finch": {:hex, :finch, "0.19.0", "c644641491ea854fc5c1bbaef36bfc764e3f08e7185e1f084e35e0672241b76d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "fc5324ce209125d1e2fa0fcd2634601c52a787aff1cd33ee833664a5af4ea2b6"},
- "floki": {:hex, :floki, "0.37.0", "b83e0280bbc6372f2a403b2848013650b16640cd2470aea6701f0632223d719e", [:mix], [], "hexpm", "516a0c15a69f78c47dc8e0b9b3724b29608aa6619379f91b1ffa47109b5d0dd3"},
- "gen_rpc": {:git, "https://github.com/supabase/gen_rpc.git", "d161cf263c661a534eaabf80aac7a34484dac772", [ref: "d161cf263c661a534eaabf80aac7a34484dac772"]},
+ "file_system": {:hex, :file_system, "1.1.1", "31864f4685b0148f25bd3fbef2b1228457c0c89024ad67f7a81a3ffbc0bbad3a", [:mix], [], "hexpm", "7a15ff97dfe526aeefb090a7a9d3d03aa907e100e262a0f8f7746b78f8f87a5d"},
+ "finch": {:hex, :finch, "0.20.0", "5330aefb6b010f424dcbbc4615d914e9e3deae40095e73ab0c1bb0968933cadf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2658131a74d051aabfcba936093c903b8e89da9a1b63e430bee62045fa9b2ee2"},
+ "floki": {:hex, :floki, "0.38.0", "62b642386fa3f2f90713f6e231da0fa3256e41ef1089f83b6ceac7a3fd3abf33", [:mix], [], "hexpm", "a5943ee91e93fb2d635b612caf5508e36d37548e84928463ef9dd986f0d1abd9"},
+ "gen_rpc": {:git, "https://github.com/supabase/gen_rpc.git", "5382a0f2689a4cb8838873a2173928281dbe5002", [ref: "5382a0f2689a4cb8838873a2173928281dbe5002"]},
"gettext": {:hex, :gettext, "0.26.2", "5978aa7b21fada6deabf1f6341ddba50bc69c999e812211903b169799208f2a8", [:mix], [{:expo, "~> 0.5.1 or ~> 1.0", [hex: :expo, repo: "hexpm", optional: false]}], "hexpm", "aa978504bcf76511efdc22d580ba08e2279caab1066b76bb9aa81c4a1e0a32a5"},
"gproc": {:hex, :gproc, "0.9.1", "f1df0364423539cf0b80e8201c8b1839e229e5f9b3ccb944c5834626998f5b8c", [:rebar3], [], "hexpm", "905088e32e72127ed9466f0bac0d8e65704ca5e73ee5a62cb073c3117916d507"},
"grpcbox": {:hex, :grpcbox, "0.17.1", "6e040ab3ef16fe699ffb513b0ef8e2e896da7b18931a1ef817143037c454bcce", [:rebar3], [{:acceptor_pool, "~> 1.0.0", [hex: :acceptor_pool, repo: "hexpm", optional: false]}, {:chatterbox, "~> 0.15.1", [hex: :ts_chatterbox, repo: "hexpm", optional: false]}, {:ctx, "~> 0.6.0", [hex: :ctx, repo: "hexpm", optional: false]}, {:gproc, "~> 0.9.1", [hex: :gproc, repo: "hexpm", optional: false]}], "hexpm", "4a3b5d7111daabc569dc9cbd9b202a3237d81c80bf97212fbc676832cb0ceb17"},
- "ham": {:hex, :ham, "0.3.0", "7cd031b4a55fba219c11553e7b13ba73bd86eab4034518445eff1e038cb9a44d", [:mix], [], "hexpm", "7d6c6b73d7a6a83233876cc1b06a4d9b5de05562b228effda4532f9a49852bf6"},
+ "ham": {:hex, :ham, "0.3.2", "02ae195f49970ef667faf9d01bc454fb80909a83d6c775bcac724ca567aeb7b3", [:mix], [], "hexpm", "b71cc684c0e5a3d32b5f94b186770551509e93a9ae44ca1c1a313700f2f6a69a"},
"hpack": {:hex, :hpack_erl, "0.3.0", "2461899cc4ab6a0ef8e970c1661c5fc6a52d3c25580bc6dd204f84ce94669926", [:rebar3], [], "hexpm", "d6137d7079169d8c485c6962dfe261af5b9ef60fbc557344511c1e65e3d95fb0"},
- "hpax": {:hex, :hpax, "1.0.2", "762df951b0c399ff67cc57c3995ec3cf46d696e41f0bba17da0518d94acd4aac", [:mix], [], "hexpm", "2f09b4c1074e0abd846747329eaa26d535be0eb3d189fa69d812bfb8bfefd32f"},
+ "hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"},
"jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"},
"joken": {:hex, :joken, "2.5.0", "09be497d804b8115eb6f07615cef2e60c2a1008fb89dc0aef0d4c4b4609b99aa", [:mix], [{:jose, "~> 1.11.2", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm", "22b25c89617c5ed8ca7b31026340a25ea0f9ca7160f9706b79be9ed81fdf74e7"},
"jose": {:hex, :jose, "1.11.10", "a903f5227417bd2a08c8a00a0cbcc458118be84480955e8d251297a425723f83", [:mix, :rebar3], [], "hexpm", "0d6cd36ff8ba174db29148fc112b5842186b68a90ce9fc2b3ec3afe76593e614"},
@@ -45,65 +45,66 @@
"logflare_api_client": {:hex, :logflare_api_client, "0.3.5", "c427ebf65a8402d68b056d4a5ef3e1eb3b90c0ad1d0de97d1fe23807e0c1b113", [:mix], [{:bertex, "~> 1.3", [hex: :bertex, repo: "hexpm", optional: false]}, {:finch, "~> 0.10", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: false]}, {:tesla, "~> 1.0", [hex: :tesla, repo: "hexpm", optional: false]}], "hexpm", "16d29abcb80c4f72745cdf943379da02a201504813c3aa12b4d4acb0302b7723"},
"logflare_etso": {:hex, :logflare_etso, "1.1.2", "040bd3e482aaf0ed20080743b7562242ec5079fd88a6f9c8ce5d8298818292e9", [:mix], [{:ecto, "~> 3.8", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "ab96be42900730a49b132891f43a9be1d52e4ad3ee9ed9cb92565c5f87345117"},
"logflare_logger_backend": {:hex, :logflare_logger_backend, "0.11.4", "3a5df94e764b7c8ee4bd7b875a480a34a27807128d8459aa59ea63b2b38bddc7", [:mix], [{:bertex, "~> 1.3", [hex: :bertex, repo: "hexpm", optional: false]}, {:logflare_api_client, "~> 0.3.5", [hex: :logflare_api_client, repo: "hexpm", optional: false]}, {:logflare_etso, "~> 1.1.2", [hex: :logflare_etso, repo: "hexpm", optional: false]}, {:typed_struct, "~> 0.3.0", [hex: :typed_struct, repo: "hexpm", optional: false]}], "hexpm", "00998d81b3c481ad93d2bf25e66d1ddb1a01ad77d994e2c1a7638c6da94755c5"},
- "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"},
+ "mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"},
"mimic": {:hex, :mimic, "1.12.0", "34c9d1fb8e756df09ca5f96861d273f2bb01063df1a6a51a4c101f9ad7f07a9c", [:mix], [{:ham, "~> 0.2", [hex: :ham, repo: "hexpm", optional: false]}], "hexpm", "eaa43d495d6f3bc8099b28886e05a1b09a2a6be083f6385c3abc17599e5e2c43"},
- "mint": {:hex, :mint, "1.6.2", "af6d97a4051eee4f05b5500671d47c3a67dac7386045d87a904126fd4bbcea2e", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "5ee441dffc1892f1ae59127f74afe8fd82fda6587794278d924e4d90ea3d63f9"},
+ "mint": {:hex, :mint, "1.7.1", "113fdb2b2f3b59e47c7955971854641c61f378549d73e829e1768de90fc1abf1", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "fceba0a4d0f24301ddee3024ae116df1c3f4bb7a563a731f45fdfeb9d39a231b"},
"mint_web_socket": {:hex, :mint_web_socket, "1.0.4", "0b539116dbb3d3f861cdf5e15e269a933cb501c113a14db7001a3157d96ffafd", [:mix], [{:mint, ">= 1.4.1 and < 2.0.0-0", [hex: :mint, repo: "hexpm", optional: false]}], "hexpm", "027d4c5529c45a4ba0ce27a01c0f35f284a5468519c045ca15f43decb360a991"},
- "mix_audit": {:hex, :mix_audit, "2.1.4", "0a23d5b07350cdd69001c13882a4f5fb9f90fbd4cbf2ebc190a2ee0d187ea3e9", [:make, :mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:yaml_elixir, "~> 2.11", [hex: :yaml_elixir, repo: "hexpm", optional: false]}], "hexpm", "fd807653cc8c1cada2911129c7eb9e985e3cc76ebf26f4dd628bb25bbcaa7099"},
+ "mix_audit": {:hex, :mix_audit, "2.1.5", "c0f77cee6b4ef9d97e37772359a187a166c7a1e0e08b50edf5bf6959dfe5a016", [:make, :mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:yaml_elixir, "~> 2.11", [hex: :yaml_elixir, repo: "hexpm", optional: false]}], "hexpm", "87f9298e21da32f697af535475860dc1d3617a010e0b418d2ec6142bc8b42d69"},
"mix_test_watch": {:hex, :mix_test_watch, "1.3.0", "2ffc9f72b0d1f4ecf0ce97b044e0e3c607c3b4dc21d6228365e8bc7c2856dc77", [:mix], [{:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}], "hexpm", "f9e5edca976857ffac78632e635750d158df14ee2d6185a15013844af7570ffe"},
"nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"},
"nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"},
- "observer_cli": {:hex, :observer_cli, "1.8.1", "edfe0c0f983631961599326f239f6e99750aba7387515002b1284dcfe7fcd6d2", [:mix, :rebar3], [{:recon, "~> 2.5.6", [hex: :recon, repo: "hexpm", optional: false]}], "hexpm", "a3cd6300dd8290ade93d688fbd79c872e393b01256309dd7a653feb13c434fb4"},
+ "observer_cli": {:hex, :observer_cli, "1.8.4", "09030c04d2480499037ba33d801c6e02adba4e7244a05e05b984b5a82843be71", [:mix, :rebar3], [{:recon, "~> 2.5.6", [hex: :recon, repo: "hexpm", optional: false]}], "hexpm", "0fcd71ac723bcd2d91266d99b3c3ccd9465c71c9f392d900cea8effdc1a1485c"},
"octo_fetch": {:hex, :octo_fetch, "0.4.0", "074b5ecbc08be10b05b27e9db08bc20a3060142769436242702931c418695b19", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "cf8be6f40cd519d7000bb4e84adcf661c32e59369ca2827c4e20042eda7a7fc6"},
- "open_api_spex": {:hex, :open_api_spex, "3.21.2", "6a704f3777761feeb5657340250d6d7332c545755116ca98f33d4b875777e1e5", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "f42ae6ed668b895ebba3e02773cfb4b41050df26f803f2ef634c72a7687dc387"},
- "opentelemetry": {:hex, :opentelemetry, "1.5.0", "7dda6551edfc3050ea4b0b40c0d2570423d6372b97e9c60793263ef62c53c3c2", [:rebar3], [{:opentelemetry_api, "~> 1.4", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}], "hexpm", "cdf4f51d17b592fc592b9a75f86a6f808c23044ba7cf7b9534debbcc5c23b0ee"},
- "opentelemetry_api": {:hex, :opentelemetry_api, "1.4.0", "63ca1742f92f00059298f478048dfb826f4b20d49534493d6919a0db39b6db04", [:mix, :rebar3], [], "hexpm", "3dfbbfaa2c2ed3121c5c483162836c4f9027def469c41578af5ef32589fcfc58"},
+ "open_api_spex": {:hex, :open_api_spex, "3.22.0", "fbf90dc82681dc042a4ee79853c8e989efbba73d9e87439085daf849bbf8bc20", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "dd751ddbdd709bb4a5313e9a24530da6e66594773c7242a0c2592cbd9f589063"},
+ "opentelemetry": {:hex, :opentelemetry, "1.6.0", "0954dbe12f490ee7b126c9e924cf60141b1238a02dfc700907eadde4dcc20460", [:rebar3], [{:opentelemetry_api, "~> 1.4.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}], "hexpm", "5fd0123d65d2649f10e478e7444927cd9fbdffcaeb8c1c2fcae3d486d18c5e62"},
+ "opentelemetry_api": {:hex, :opentelemetry_api, "1.4.1", "e071429a37441a0fe9097eeea0ff921ebadce8eba8e1ce297b05a43c7a0d121f", [:mix, :rebar3], [], "hexpm", "39bdb6ad740bc13b16215cb9f233d66796bbae897f3bf6eb77abb712e87c3c26"},
"opentelemetry_cowboy": {:hex, :opentelemetry_cowboy, "1.0.0", "786c7cde66a2493323c79d2c94e679ff501d459a9b403d8b60b9bef116333117", [:rebar3], [{:cowboy_telemetry, "~> 0.4", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.4", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_semantic_conventions, "~> 1.27", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.1", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:otel_http, "~> 0.2", [hex: :otel_http, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.1", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7575716eaccacd0eddc3e7e61403aecb5d0a6397183987d6049094aeb0b87a7c"},
"opentelemetry_ecto": {:hex, :opentelemetry_ecto, "1.2.0", "2382cb47ddc231f953d3b8263ed029d87fbf217915a1da82f49159d122b64865", [:mix], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_process_propagator, "~> 0.2", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "70dfa2e79932e86f209df00e36c980b17a32f82d175f0068bf7ef9a96cf080cf"},
- "opentelemetry_exporter": {:hex, :opentelemetry_exporter, "1.8.0", "5d546123230771ef4174e37bedfd77e3374913304cd6ea3ca82a2add49cd5d56", [:rebar3], [{:grpcbox, ">= 0.0.0", [hex: :grpcbox, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.5.0", [hex: :opentelemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.4.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.18", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}], "hexpm", "a1f9f271f8d3b02b81462a6bfef7075fd8457fdb06adff5d2537df5e2264d9af"},
+ "opentelemetry_exporter": {:hex, :opentelemetry_exporter, "1.9.0", "e344bf5e3dab2815fe381b0cac172c06cfc29ecf792c5d74cbbd2b3184af359c", [:rebar3], [{:grpcbox, ">= 0.0.0", [hex: :grpcbox, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.6.0", [hex: :opentelemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.4.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.18", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}], "hexpm", "2030a59e33afff6aaeba847d865c8db5dc3873db87a9257df2ca03cafd9e0478"},
"opentelemetry_phoenix": {:hex, :opentelemetry_phoenix, "2.0.1", "c664cdef205738cffcd409b33599439a4ffb2035ef6e21a77927ac1da90463cb", [:mix], [{:nimble_options, "~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.4", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_process_propagator, "~> 0.3", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: false]}, {:opentelemetry_semantic_conventions, "~> 1.27", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.1", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:otel_http, "~> 0.2", [hex: :otel_http, repo: "hexpm", optional: false]}, {:plug, ">= 1.11.0", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a24fdccdfa6b890c8892c6366beab4a15a27ec0c692b0f77ec2a862e7b235f6e"},
"opentelemetry_process_propagator": {:hex, :opentelemetry_process_propagator, "0.3.0", "ef5b2059403a1e2b2d2c65914e6962e56371570b8c3ab5323d7a8d3444fb7f84", [:mix, :rebar3], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}], "hexpm", "7243cb6de1523c473cba5b1aefa3f85e1ff8cc75d08f367104c1e11919c8c029"},
"opentelemetry_semantic_conventions": {:hex, :opentelemetry_semantic_conventions, "1.27.0", "acd0194a94a1e57d63da982ee9f4a9f88834ae0b31b0bd850815fe9be4bbb45f", [:mix, :rebar3], [], "hexpm", "9681ccaa24fd3d810b4461581717661fd85ff7019b082c2dff89c7d5b1fc2864"},
"opentelemetry_telemetry": {:hex, :opentelemetry_telemetry, "1.1.2", "410ab4d76b0921f42dbccbe5a7c831b8125282850be649ee1f70050d3961118a", [:mix, :rebar3], [{:opentelemetry_api, "~> 1.3", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.1", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "641ab469deb181957ac6d59bce6e1321d5fe2a56df444fc9c19afcad623ab253"},
"otel_http": {:hex, :otel_http, "0.2.0", "b17385986c7f1b862f5d577f72614ecaa29de40392b7618869999326b9a61d8a", [:rebar3], [], "hexpm", "f2beadf922c8cfeb0965488dd736c95cc6ea8b9efce89466b3904d317d7cc717"},
- "phoenix": {:hex, :phoenix, "1.7.19", "36617efe5afbd821099a8b994ff4618a340a5bfb25531a1802c4d4c634017a57", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "ba4dc14458278773f905f8ae6c2ec743d52c3a35b6b353733f64f02dfe096cd6"},
+ "peep": {:git, "https://github.com/supabase/peep.git", "3ba8f8f77f4c8dae734f9d8f603c24c1046502da", [branch: "feat/partitions-ets"]},
+ "phoenix": {:git, "https://github.com/supabase/phoenix.git", "7b884cc0cc1a49ad2bc272acda2e622b3e11c139", [branch: "feat/presence-custom-dispatcher-1.7.19"]},
"phoenix_ecto": {:hex, :phoenix_ecto, "4.4.3", "86e9878f833829c3f66da03d75254c155d91d72a201eb56ae83482328dc7ca93", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "d36c401206f3011fefd63d04e8ef626ec8791975d9d107f9a0817d426f61ac07"},
"phoenix_html": {:hex, :phoenix_html, "3.3.4", "42a09fc443bbc1da37e372a5c8e6755d046f22b9b11343bf885067357da21cb3", [:mix], [{:plug, "~> 1.5", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "0249d3abec3714aff3415e7ee3d9786cb325be3151e6c4b3021502c585bf53fb"},
- "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.6", "7b1f0327f54c9eb69845fd09a77accf922f488c549a7e7b8618775eb603a62c7", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "1681ab813ec26ca6915beb3414aa138f298e17721dc6a2bde9e6eb8a62360ff6"},
- "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.5.3", "f2161c207fda0e4fb55165f650f7f8db23f02b29e3bff00ff7ef161d6ac1f09d", [:mix], [{:file_system, "~> 0.3 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "b4ec9cd73cb01ff1bd1cac92e045d13e7030330b74164297d1aee3907b54803c"},
+ "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.7", "405880012cb4b706f26dd1c6349125bfc903fb9e44d1ea668adaf4e04d4884b7", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "3a8625cab39ec261d48a13b7468dc619c0ede099601b084e343968309bd4d7d7"},
+ "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.6.1", "05df733a09887a005ed0d69a7fc619d376aea2730bf64ce52ac51ce716cc1ef0", [:mix], [{:file_system, "~> 0.2.10 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "74273843d5a6e4fef0bbc17599f33e3ec63f08e69215623a0cd91eea4288e5a0"},
"phoenix_live_view": {:hex, :phoenix_live_view, "0.20.17", "f396bbdaf4ba227b82251eb75ac0afa6b3da5e509bc0d030206374237dfc9450", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a61d741ffb78c85fdbca0de084da6a48f8ceb5261a79165b5a0b59e5f65ce98b"},
"phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"},
"phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"},
"phoenix_view": {:hex, :phoenix_view, "2.0.4", "b45c9d9cf15b3a1af5fb555c674b525391b6a1fe975f040fb4d913397b31abf4", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}], "hexpm", "4e992022ce14f31fe57335db27a28154afcc94e9983266835bb3040243eb620b"},
- "plug": {:hex, :plug, "1.16.1", "40c74619c12f82736d2214557dedec2e9762029b2438d6d175c5074c933edc9d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a13ff6b9006b03d7e33874945b2755253841b238c34071ed85b0e86057f8cddc"},
- "plug_cowboy": {:hex, :plug_cowboy, "2.7.2", "fdadb973799ae691bf9ecad99125b16625b1c6039999da5fe544d99218e662e4", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "245d8a11ee2306094840c000e8816f0cbed69a23fc0ac2bcf8d7835ae019bb2f"},
- "plug_crypto": {:hex, :plug_crypto, "2.1.0", "f44309c2b06d249c27c8d3f65cfe08158ade08418cf540fd4f72d4d6863abb7b", [:mix], [], "hexpm", "131216a4b030b8f8ce0f26038bc4421ae60e4bb95c5cf5395e1421437824c4fa"},
+ "plug": {:hex, :plug, "1.18.1", "5067f26f7745b7e31bc3368bc1a2b818b9779faa959b49c934c17730efc911cf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "57a57db70df2b422b564437d2d33cf8d33cd16339c1edb190cd11b1a3a546cc2"},
+ "plug_cowboy": {:hex, :plug_cowboy, "2.7.4", "729c752d17cf364e2b8da5bdb34fb5804f56251e88bb602aff48ae0bd8673d11", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "9b85632bd7012615bae0a5d70084deb1b25d2bcbb32cab82d1e9a1e023168aa3"},
+ "plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"},
"poolboy": {:hex, :poolboy, "1.5.2", "392b007a1693a64540cead79830443abf5762f5d30cf50bc95cb2c1aaafa006b", [:rebar3], [], "hexpm", "dad79704ce5440f3d5a3681c8590b9dc25d1a561e8f5a9c995281012860901e3"},
- "postgres_replication": {:git, "https://github.com/filipecabaco/postgres_replication.git", "69129221f0263aa13faa5fbb8af97c28aeb4f71c", []},
+ "postgres_replication": {:git, "https://github.com/filipecabaco/postgres_replication.git", "3b0700ee38a1dddaf7936c5793d6f35431fee2cd", []},
"postgrex": {:hex, :postgrex, "0.20.0", "363ed03ab4757f6bc47942eff7720640795eb557e1935951c1626f0d303a3aed", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "d36ef8b36f323d29505314f704e21a1a038e2dc387c6409ee0cd24144e187c0f"},
- "prom_ex": {:hex, :prom_ex, "1.9.0", "63e6dda6c05cdeec1f26c48443dcc38ffd2118b3665ae8d2bd0e5b79f2aea03e", [:mix], [{:absinthe, ">= 1.6.0", [hex: :absinthe, repo: "hexpm", optional: true]}, {:broadway, ">= 1.0.2", [hex: :broadway, repo: "hexpm", optional: true]}, {:ecto, ">= 3.5.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:finch, "~> 0.15", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: false]}, {:oban, ">= 2.4.0", [hex: :oban, repo: "hexpm", optional: true]}, {:octo_fetch, "~> 0.3", [hex: :octo_fetch, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.5.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, ">= 0.14.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, ">= 1.12.1", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, "~> 2.5", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:telemetry, ">= 1.0.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.0", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 1.0", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "01f3d4f69ec93068219e686cc65e58a29c42bea5429a8ff4e2121f19db178ee6"},
- "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"},
+ "prom_ex": {:hex, :prom_ex, "1.11.0", "1f6d67f2dead92224cb4f59beb3e4d319257c5728d9638b4a5e8ceb51a4f9c7e", [:mix], [{:absinthe, ">= 1.7.0", [hex: :absinthe, repo: "hexpm", optional: true]}, {:broadway, ">= 1.1.0", [hex: :broadway, repo: "hexpm", optional: true]}, {:ecto, ">= 3.11.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:finch, "~> 0.18", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:oban, ">= 2.10.0", [hex: :oban, repo: "hexpm", optional: true]}, {:octo_fetch, "~> 0.4", [hex: :octo_fetch, repo: "hexpm", optional: false]}, {:peep, "~> 3.0", [hex: :peep, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.7.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, ">= 0.20.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, ">= 1.16.0", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, ">= 2.6.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:telemetry, ">= 1.0.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.2", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 1.1", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "76b074bc3730f0802978a7eb5c7091a65473eaaf07e99ec9e933138dcc327805"},
+ "ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"},
"recon": {:hex, :recon, "2.5.6", "9052588e83bfedfd9b72e1034532aee2a5369d9d9343b61aeb7fbce761010741", [:mix, :rebar3], [], "hexpm", "96c6799792d735cc0f0fd0f86267e9d351e63339cbe03df9d162010cefc26bb0"},
- "req": {:hex, :req, "0.5.10", "a3a063eab8b7510785a467f03d30a8d95f66f5c3d9495be3474b61459c54376c", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "8a604815743f8a2d3b5de0659fa3137fa4b1cffd636ecb69b30b2b9b2c2559be"},
+ "req": {:hex, :req, "0.5.15", "662020efb6ea60b9f0e0fac9be88cd7558b53fe51155a2d9899de594f9906ba9", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "a6513a35fad65467893ced9785457e91693352c70b58bbc045b47e5eb2ef0c53"},
"sleeplocks": {:hex, :sleeplocks, "1.1.3", "96a86460cc33b435c7310dbd27ec82ca2c1f24ae38e34f8edde97f756503441a", [:rebar3], [], "hexpm", "d3b3958552e6eb16f463921e70ae7c767519ef8f5be46d7696cc1ed649421321"},
"snabbkaffe": {:git, "https://github.com/kafka4beam/snabbkaffe", "b59298334ed349556f63405d1353184c63c66534", [tag: "1.0.10"]},
- "sobelow": {:hex, :sobelow, "0.13.0", "218afe9075904793f5c64b8837cc356e493d88fddde126a463839351870b8d1e", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "cd6e9026b85fc35d7529da14f95e85a078d9dd1907a9097b3ba6ac7ebbe34a0d"},
+ "sobelow": {:hex, :sobelow, "0.14.1", "2f81e8632f15574cba2402bcddff5497b413c01e6f094bc0ab94e83c2f74db81", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8fac9a2bd90fdc4b15d6fca6e1608efb7f7c600fa75800813b794ee9364c87f2"},
"ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"},
- "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"},
+ "statistex": {:hex, :statistex, "1.1.0", "7fec1eb2f580a0d2c1a05ed27396a084ab064a40cfc84246dbfb0c72a5c761e5", [:mix], [], "hexpm", "f5950ea26ad43246ba2cce54324ac394a4e7408fdcf98b8e230f503a0cba9cf5"},
"syn": {:hex, :syn, "3.3.0", "4684a909efdfea35ce75a9662fc523e4a8a4e8169a3df275e4de4fa63f99c486", [:rebar3], [], "hexpm", "e58ee447bc1094bdd21bf0acc102b1fbf99541a508cd48060bf783c245eaf7d6"},
"table_rex": {:hex, :table_rex, "4.1.0", "fbaa8b1ce154c9772012bf445bfb86b587430fb96f3b12022d3f35ee4a68c918", [:mix], [], "hexpm", "95932701df195d43bc2d1c6531178fc8338aa8f38c80f098504d529c43bc2601"},
- "tailwind": {:hex, :tailwind, "0.2.4", "5706ec47182d4e7045901302bf3a333e80f3d1af65c442ba9a9eed152fb26c2e", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "c6e4a82b8727bab593700c998a4d98cf3d8025678bfde059aed71d0000c3e463"},
+ "tailwind": {:hex, :tailwind, "0.4.1", "e7bcc222fe96a1e55f948e76d13dd84a1a7653fb051d2a167135db3b4b08d3e9", [:mix], [], "hexpm", "6249d4f9819052911120dbdbe9e532e6bd64ea23476056adb7f730aa25c220d1"},
"telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"},
- "telemetry_metrics": {:hex, :telemetry_metrics, "0.6.2", "2caabe9344ec17eafe5403304771c3539f3b6e2f7fb6a6f602558c825d0d0bfb", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9b43db0dc33863930b9ef9d27137e78974756f5f198cae18409970ed6fa5b561"},
+ "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"},
"telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.2.1", "c9755987d7b959b557084e6990990cb96a50d6482c683fb9622a63837f3cd3d8", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e2c599da4983c4f88a33e9571f1458bf98b0cf6ba930f1dc3a6e8cf45d5afb6"},
- "telemetry_poller": {:hex, :telemetry_poller, "1.1.0", "58fa7c216257291caaf8d05678c8d01bd45f4bdbc1286838a28c4bb62ef32999", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9eb9d9cbfd81cbd7cdd24682f8711b6e2b691289a0de6826e58452f28c103c8f"},
- "tesla": {:hex, :tesla, "1.13.2", "85afa342eb2ac0fee830cf649dbd19179b6b359bec4710d02a3d5d587f016910", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:mox, "~> 1.0", [hex: :mox, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "960609848f1ef654c3cdfad68453cd84a5febecb6ed9fed9416e36cd9cd724f9"},
- "tls_certificate_check": {:hex, :tls_certificate_check, "1.28.0", "c39bf21f67c2d124ae905454fad00f27e625917e8ab1009146e916e1df6ab275", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "3ab058c3f9457fffca916729587415f0ddc822048a0e5b5e2694918556d92df1"},
+ "telemetry_poller": {:hex, :telemetry_poller, "1.3.0", "d5c46420126b5ac2d72bc6580fb4f537d35e851cc0f8dbd571acf6d6e10f5ec7", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "51f18bed7128544a50f75897db9974436ea9bfba560420b646af27a9a9b35211"},
+ "tesla": {:hex, :tesla, "1.15.3", "3a2b5c37f09629b8dcf5d028fbafc9143c0099753559d7fe567eaabfbd9b8663", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.21", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:mox, "~> 1.0", [hex: :mox, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "98bb3d4558abc67b92fb7be4cd31bb57ca8d80792de26870d362974b58caeda7"},
+ "tls_certificate_check": {:hex, :tls_certificate_check, "1.29.0", "4473005eb0bbdad215d7083a230e2e076f538d9ea472c8009fd22006a4cfc5f6", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "5b0d0e5cb0f928bc4f210df667304ed91c5bff2a391ce6bdedfbfe70a8f096c5"},
"typed_struct": {:hex, :typed_struct, "0.3.0", "939789e3c1dca39d7170c87f729127469d1315dcf99fee8e152bb774b17e7ff7", [:mix], [], "hexpm", "c50bd5c3a61fe4e198a8504f939be3d3c85903b382bde4865579bc23111d1b6d"},
"unsafe": {:hex, :unsafe, "1.0.2", "23c6be12f6c1605364801f4b47007c0c159497d0446ad378b5cf05f1855c0581", [:mix], [], "hexpm", "b485231683c3ab01a9cd44cb4a79f152c6f3bb87358439c6f68791b85c2df675"},
"uuid": {:hex, :uuid, "1.1.8", "e22fc04499de0de3ed1116b770c7737779f226ceefa0badb3592e64d5cfb4eb9", [:mix], [], "hexpm", "c790593b4c3b601f5dc2378baae7efaf5b3d73c4c6456ba85759905be792f2ac"},
"websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"},
"websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"},
"yamerl": {:hex, :yamerl, "0.10.0", "4ff81fee2f1f6a46f1700c0d880b24d193ddb74bd14ef42cb0bcf46e81ef2f8e", [:rebar3], [], "hexpm", "346adb2963f1051dc837a2364e4acf6eb7d80097c0f53cbdc3046ec8ec4b4e6e"},
- "yaml_elixir": {:hex, :yaml_elixir, "2.11.0", "9e9ccd134e861c66b84825a3542a1c22ba33f338d82c07282f4f1f52d847bd50", [:mix], [{:yamerl, "~> 0.10", [hex: :yamerl, repo: "hexpm", optional: false]}], "hexpm", "53cc28357ee7eb952344995787f4bb8cc3cecbf189652236e9b163e8ce1bc242"},
+ "yaml_elixir": {:hex, :yaml_elixir, "2.12.0", "30343ff5018637a64b1b7de1ed2a3ca03bc641410c1f311a4dbdc1ffbbf449c7", [:mix], [{:yamerl, "~> 0.10", [hex: :yamerl, repo: "hexpm", optional: false]}], "hexpm", "ca6bacae7bac917a7155dca0ab6149088aa7bc800c94d0fe18c5238f53b313c6"},
}
diff --git a/priv/repo/dev_seeds.exs b/priv/repo/dev_seeds.exs
index 7dec7895a..7767edec0 100644
--- a/priv/repo/dev_seeds.exs
+++ b/priv/repo/dev_seeds.exs
@@ -1,5 +1,3 @@
-import Ecto.Adapters.SQL, only: [query!: 3]
-
alias Realtime.Api.Tenant
alias Realtime.Database
alias Realtime.Repo
@@ -41,19 +39,6 @@ default_db_host = "127.0.0.1"
})
|> Repo.insert!()
- publication = "supabase_realtime"
-
- [
- "drop publication if exists #{publication}",
- "drop table if exists public.test_tenant;",
- "create table public.test_tenant ( id SERIAL PRIMARY KEY, details text );",
- "grant all on table public.test_tenant to anon;",
- "grant all on table public.test_tenant to postgres;",
- "grant all on table public.test_tenant to authenticated;",
- "create publication #{publication} for table public.test_tenant"
- ]
- |> Enum.each(&query!(Repo, &1, []))
-
tenant
end)
@@ -61,10 +46,22 @@ default_db_host = "127.0.0.1"
settings = Database.from_tenant(tenant, "realtime_migrations", :stop)
settings = %{settings | max_restarts: 0, ssl: false}
{:ok, tenant_conn} = Database.connect_db(settings)
+publication = "supabase_realtime"
Postgrex.transaction(tenant_conn, fn db_conn ->
Postgrex.query!(db_conn, "DROP SCHEMA IF EXISTS realtime CASCADE", [])
Postgrex.query!(db_conn, "CREATE SCHEMA IF NOT EXISTS realtime", [])
+
+ [
+ "drop publication if exists #{publication}",
+ "drop table if exists public.test_tenant;",
+ "create table public.test_tenant ( id SERIAL PRIMARY KEY, details text );",
+ "grant all on table public.test_tenant to anon;",
+ "grant all on table public.test_tenant to postgres;",
+ "grant all on table public.test_tenant to authenticated;",
+ "create publication #{publication} for table public.test_tenant"
+ ]
+ |> Enum.each(&Postgrex.query!(db_conn, &1))
end)
case Tenants.Migrations.run_migrations(tenant) do
diff --git a/priv/repo/migrations/20250926223044_set_default_presence_value.exs b/priv/repo/migrations/20250926223044_set_default_presence_value.exs
new file mode 100644
index 000000000..5f1833a34
--- /dev/null
+++ b/priv/repo/migrations/20250926223044_set_default_presence_value.exs
@@ -0,0 +1,10 @@
+defmodule Realtime.Repo.Migrations.SetDefaultPresenceValue do
+ use Ecto.Migration
+ @disable_ddl_transaction true
+ @disable_migration_lock true
+ def change do
+ alter table(:tenants) do
+ modify :max_presence_events_per_second, :integer, default: 1000
+ end
+ end
+end
diff --git a/priv/repo/migrations/20251204170944_nullable_jwt_secrets.exs b/priv/repo/migrations/20251204170944_nullable_jwt_secrets.exs
new file mode 100644
index 000000000..342a80ad9
--- /dev/null
+++ b/priv/repo/migrations/20251204170944_nullable_jwt_secrets.exs
@@ -0,0 +1,13 @@
+defmodule Realtime.Repo.Migrations.NullableJwtSecrets do
+ use Ecto.Migration
+
+ def change do
+ alter table(:tenants) do
+ modify :jwt_secret, :text, null: true
+ end
+
+ create constraint(:tenants, :jwt_secret_or_jwt_jwks_required,
+ check: "jwt_secret IS NOT NULL OR jwt_jwks IS NOT NULL"
+ )
+ end
+end
diff --git a/priv/repo/migrations/20251218000543_ensure_jwt_secret_is_text.exs b/priv/repo/migrations/20251218000543_ensure_jwt_secret_is_text.exs
new file mode 100644
index 000000000..008c9d7db
--- /dev/null
+++ b/priv/repo/migrations/20251218000543_ensure_jwt_secret_is_text.exs
@@ -0,0 +1,9 @@
+defmodule Realtime.Repo.Migrations.EnsureJwtSecretIsText do
+ use Ecto.Migration
+
+ def change do
+ alter table(:tenants) do
+ modify :jwt_secret, :text, null: true
+ end
+ end
+end
diff --git a/rel/vm.args.eex b/rel/vm.args.eex
index 278da5524..983e240c4 100644
--- a/rel/vm.args.eex
+++ b/rel/vm.args.eex
@@ -10,8 +10,8 @@
## Tweak GC to run more often
##-env ERL_FULLSWEEP_AFTER 10
-## Limit process heap for all procs to 1000 MB
-+hmax 1000000000
+## Limit process heap for all procs to 2500 MB. The number here is the number of words
++hmax <%= div(2_500_000_000, :erlang.system_info(:wordsize)) %>
## Set distribution buffer busy limit (default is 1024)
+zdbbl 100000
@@ -19,4 +19,4 @@
## Disable Busy Wait
+sbwt none
+sbwtdio none
-+sbwtdcpu none
\ No newline at end of file
++sbwtdcpu none
diff --git a/run.sh b/run.sh
index 2dddbc1b8..66585dc2b 100755
--- a/run.sh
+++ b/run.sh
@@ -90,7 +90,7 @@ if [ "${ENABLE_ERL_CRASH_DUMP:-false}" = true ]; then
trap upload_crash_dump_to_s3 INT TERM KILL EXIT
fi
-if [[ -n "${GENERATE_CLUSTER_CERTS}" ]] ; then
+if [[ -n "${GENERATE_CLUSTER_CERTS:-}" ]] ; then
generate_certs
fi
diff --git a/test/e2e/tests.ts b/test/e2e/tests.ts
index 2711a959e..4193b06c2 100644
--- a/test/e2e/tests.ts
+++ b/test/e2e/tests.ts
@@ -1,8 +1,5 @@
import { load } from "https://deno.land/std@0.224.0/dotenv/mod.ts";
-import {
- createClient,
- SupabaseClient,
-} from "npm:@supabase/supabase-js@2.49.5-next.5";
+import { createClient, SupabaseClient } from "npm:@supabase/supabase-js@latest";
import { assertEquals } from "https://deno.land/std@0.224.0/assert/mod.ts";
import {
describe,
@@ -69,11 +66,7 @@ describe("broadcast extension", () => {
while (activeChannel.state == "joining") await sleep(0.2);
// Send from unsubscribed channel
- supabase.channel(topic, config).send({
- type: "broadcast",
- event,
- payload: expectedPayload,
- });
+ supabase.channel(topic, config).httpSend(event, expectedPayload);
while (result == null) await sleep(0.2);
diff --git a/test/integration/distributed_realtime_channel_test.exs b/test/integration/distributed_realtime_channel_test.exs
new file mode 100644
index 000000000..54411d414
--- /dev/null
+++ b/test/integration/distributed_realtime_channel_test.exs
@@ -0,0 +1,61 @@
+defmodule Realtime.Integration.DistributedRealtimeChannelTest do
+ # Use of Clustered
+ use RealtimeWeb.ConnCase,
+ async: false,
+ parameterize: [%{serializer: Phoenix.Socket.V1.JSONSerializer}, %{serializer: RealtimeWeb.Socket.V2Serializer}]
+
+ alias Phoenix.Socket.Message
+
+ alias Realtime.Tenants.Connect
+ alias Realtime.Integration.WebsocketClient
+
+ setup do
+ tenant = Realtime.Api.get_tenant_by_external_id("dev_tenant")
+
+ RateCounterHelper.stop(tenant.external_id)
+
+ Connect.shutdown(tenant.external_id)
+ # Sleeping so that syn can forget about this Connect process
+ Process.sleep(100)
+
+ on_exit(fn ->
+ Connect.shutdown(tenant.external_id)
+ # Sleeping so that syn can forget about this Connect process
+ Process.sleep(100)
+ end)
+
+ on_exit(fn -> Connect.shutdown(tenant.external_id) end)
+ {:ok, node} = Clustered.start()
+ region = Realtime.Tenants.region(tenant)
+ {:ok, db_conn} = :erpc.call(node, Connect, :connect, ["dev_tenant", region])
+ assert Connect.ready?(tenant.external_id)
+
+ assert node(db_conn) == node
+ %{tenant: tenant, topic: random_string()}
+ end
+
+ describe "distributed broadcast" do
+ @tag mode: :distributed
+ test "it works", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {:ok, token} =
+ generate_token(tenant, %{exp: System.system_time(:second) + 1000, role: "authenticated", sub: random_string()})
+
+ {:ok, remote_socket} =
+ WebsocketClient.connect(self(), uri(tenant, serializer, 4012), serializer, [{"x-api-key", token}])
+
+ {:ok, socket} = WebsocketClient.connect(self(), uri(tenant, serializer), serializer, [{"x-api-key", token}])
+
+ config = %{broadcast: %{self: false}, private: false}
+ topic = "realtime:#{topic}"
+
+ :ok = WebsocketClient.join(remote_socket, topic, %{config: config})
+ :ok = WebsocketClient.join(socket, topic, %{config: config})
+
+ # Send through one socket and receive through the other (self: false)
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ :ok = WebsocketClient.send_event(remote_socket, topic, "broadcast", payload)
+
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 2000
+ end
+ end
+end
diff --git a/test/integration/measure_traffic_test.exs b/test/integration/measure_traffic_test.exs
new file mode 100644
index 000000000..5a560536a
--- /dev/null
+++ b/test/integration/measure_traffic_test.exs
@@ -0,0 +1,233 @@
+defmodule Realtime.Integration.MeasureTrafficTest do
+ use RealtimeWeb.ConnCase, async: false
+
+ alias Phoenix.Socket.Message
+ alias Realtime.Integration.WebsocketClient
+
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+
+ {:ok, db_conn} = Realtime.Tenants.Connect.lookup_or_start_connection(tenant.external_id)
+ assert Realtime.Tenants.Connect.ready?(tenant.external_id)
+ %{db_conn: db_conn, tenant: tenant}
+ end
+
+ def handle_telemetry(event, measurements, metadata, name) do
+ tenant = metadata[:tenant]
+ [key] = Enum.take(event, -1)
+ value = Map.get(measurements, :sum) || Map.get(measurements, :value) || Map.get(measurements, :size) || 0
+
+ Agent.update(name, fn state ->
+ state =
+ Map.put_new(
+ state,
+ tenant,
+ %{
+ joins: 0,
+ events: 0,
+ db_events: 0,
+ presence_events: 0,
+ output_bytes: 0,
+ input_bytes: 0
+ }
+ )
+
+ update_in(state, [metadata[:tenant], key], fn v -> (v || 0) + value end)
+ end)
+ end
+
+ defp get_count(event, tenant) do
+ [key] = Enum.take(event, -1)
+
+ :"TestCounter_#{tenant}"
+ |> Agent.get(fn state -> get_in(state, [tenant, key]) || 0 end)
+ end
+
+ describe "measure traffic" do
+ setup %{tenant: tenant} do
+ events = [
+ [:realtime, :channel, :output_bytes],
+ [:realtime, :channel, :input_bytes]
+ ]
+
+ name = :"TestCounter_#{tenant.external_id}"
+
+ {:ok, _} =
+ start_supervised(%{
+ id: 1,
+ start: {Agent, :start_link, [fn -> %{} end, [name: name]]}
+ })
+
+ RateCounterHelper.stop(tenant.external_id)
+ on_exit(fn -> :telemetry.detach({__MODULE__, tenant.external_id}) end)
+ :telemetry.attach_many({__MODULE__, tenant.external_id}, events, &__MODULE__.handle_telemetry/4, name)
+
+ measure_traffic_interval_in_ms = Application.get_env(:realtime, :measure_traffic_interval_in_ms)
+ Application.put_env(:realtime, :measure_traffic_interval_in_ms, 10)
+ on_exit(fn -> Application.put_env(:realtime, :measure_traffic_interval_in_ms, measure_traffic_interval_in_ms) end)
+
+ :ok
+ end
+
+ test "measure traffic for broadcast events", %{tenant: tenant} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Wait for join to complete
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ for _ <- 1..5 do
+ WebsocketClient.send_event(socket, topic, "broadcast", %{
+ "event" => "TEST",
+ "payload" => %{"msg" => 1},
+ "type" => "broadcast"
+ })
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"},
+ topic: ^topic
+ },
+ 500
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+ Process.sleep(100)
+
+ output_bytes = get_count([:realtime, :channel, :output_bytes], tenant.external_id)
+ input_bytes = get_count([:realtime, :channel, :input_bytes], tenant.external_id)
+
+ assert output_bytes > 0
+ assert input_bytes > 0
+ end
+
+ test "measure traffic for presence events", %{tenant: tenant} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, presence: %{enabled: true}}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Wait for join to complete
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ for _ <- 1..5 do
+ WebsocketClient.send_event(socket, topic, "presence", %{
+ "event" => "TRACK",
+ "payload" => %{name: "realtime_presence_#{:rand.uniform(1000)}", t: 1814.7000000029802},
+ "type" => "presence"
+ })
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+ Process.sleep(100)
+
+ output_bytes = get_count([:realtime, :channel, :output_bytes], tenant.external_id)
+ input_bytes = get_count([:realtime, :channel, :input_bytes], tenant.external_id)
+
+ assert output_bytes > 0, "Expected output_bytes to be greater than 0, got #{output_bytes}"
+ assert input_bytes > 0, "Expected input_bytes to be greater than 0, got #{input_bytes}"
+ end
+
+ test "measure traffic for postgres changes events", %{tenant: tenant, db_conn: db_conn} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "public"}]}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Wait for join to complete
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ # Wait for postgres_changes subscription to be ready
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "status" => "ok"
+ },
+ topic: ^topic
+ },
+ 8000
+
+ for _ <- 1..5 do
+ Postgrex.query!(db_conn, "INSERT INTO test (details) VALUES ($1)", [random_string()])
+ end
+
+ for _ <- 1..5 do
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{"data" => %{"schema" => "public", "table" => "test", "type" => "INSERT"}},
+ topic: ^topic
+ },
+ 500
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+ Process.sleep(100)
+
+ output_bytes = get_count([:realtime, :channel, :output_bytes], tenant.external_id)
+ input_bytes = get_count([:realtime, :channel, :input_bytes], tenant.external_id)
+
+ assert output_bytes > 0, "Expected output_bytes to be greater than 0, got #{output_bytes}"
+ assert input_bytes > 0, "Expected input_bytes to be greater than 0, got #{input_bytes}"
+ end
+
+ test "measure traffic for db events", %{tenant: tenant, db_conn: db_conn} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, db: %{enabled: true}}
+ topic = "realtime:any"
+ channel_name = "any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Wait for join to complete
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ for _ <- 1..5 do
+ event = random_string()
+ value = random_string()
+
+ Postgrex.query!(
+ db_conn,
+ "SELECT realtime.send (json_build_object ('value', $1 :: text)::jsonb, $2 :: text, $3 :: text, FALSE::bool);",
+ [value, event, channel_name]
+ )
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => ^event,
+ "payload" => %{"value" => ^value},
+ "type" => "broadcast"
+ },
+ topic: ^topic,
+ join_ref: nil,
+ ref: nil
+ },
+ 1000
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+ Process.sleep(100)
+
+ output_bytes = get_count([:realtime, :channel, :output_bytes], tenant.external_id)
+ input_bytes = get_count([:realtime, :channel, :input_bytes], tenant.external_id)
+
+ assert output_bytes > 0, "Expected output_bytes to be greater than 0, got #{output_bytes}"
+ assert input_bytes > 0, "Expected input_bytes to be greater than 0, got #{input_bytes}"
+ end
+ end
+end
diff --git a/test/integration/region_aware_migrations_test.exs b/test/integration/region_aware_migrations_test.exs
new file mode 100644
index 000000000..892ed2382
--- /dev/null
+++ b/test/integration/region_aware_migrations_test.exs
@@ -0,0 +1,70 @@
+defmodule Realtime.Integration.RegionAwareMigrationsTest do
+ use Realtime.DataCase, async: false
+ use Mimic
+
+ alias Containers
+ alias Realtime.Tenants
+ alias Realtime.Tenants.Migrations
+
+ setup do
+ {:ok, port} = Containers.checkout()
+
+ settings = [
+ %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "db_port" => "#{port}",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "ap-southeast-2",
+ "publication" => "supabase_realtime_test",
+ "ssl_enforced" => false
+ }
+ }
+ ]
+
+ tenant = tenant_fixture(%{extensions: settings})
+ region = Application.get_env(:realtime, :region)
+
+ {:ok, node} =
+ Clustered.start(nil,
+ extra_config: [
+ {:realtime, :region, Tenants.region(tenant)},
+ {:realtime, :master_region, region}
+ ]
+ )
+
+ Process.sleep(100)
+
+ %{tenant: tenant, node: node}
+ end
+
+ test "run_migrations routes to node in tenant's region with expected arguments", %{tenant: tenant, node: node} do
+ assert tenant.migrations_ran == 0
+
+ Realtime.GenRpc
+ |> Mimic.expect(:call, fn called_node, mod, func, args, opts ->
+ assert called_node == node
+ assert mod == Migrations
+ assert func == :start_migration
+ assert opts[:tenant_id] == tenant.external_id
+
+ arg = hd(args)
+ assert arg.tenant_external_id == tenant.external_id
+ assert arg.migrations_ran == tenant.migrations_ran
+ assert arg.settings == hd(tenant.extensions).settings
+
+ call_original(Realtime.GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ assert :ok = Migrations.run_migrations(tenant)
+ Process.sleep(1000)
+ tenant = Realtime.Repo.reload!(tenant)
+ refute tenant.migrations_ran == 0
+ end
+end
diff --git a/test/integration/region_aware_routing_test.exs b/test/integration/region_aware_routing_test.exs
new file mode 100644
index 000000000..f9f5178f2
--- /dev/null
+++ b/test/integration/region_aware_routing_test.exs
@@ -0,0 +1,232 @@
+defmodule Realtime.Integration.RegionAwareRoutingTest do
+ use Realtime.DataCase, async: false
+ use Mimic
+
+ alias Realtime.Api
+ alias Realtime.Api.Tenant
+ alias Realtime.GenRpc
+ alias Realtime.Nodes
+
+ setup do
+ # Configure test runner as non-master region (eu-west-1) with master_region = us-east-1
+ original_master_region = Application.get_env(:realtime, :master_region)
+
+ Application.put_env(:realtime, :master_region, "eu-west-2")
+
+ # Start peer node as master region (us-east-1)
+ # The master node will automatically register itself in RegionNodes on startup
+ {:ok, master_node} =
+ Clustered.start(nil,
+ extra_config: [
+ {:realtime, :region, "eu-west-2"},
+ {:realtime, :master_region, "eu-west-2"}
+ ]
+ )
+
+ Process.sleep(100)
+
+ on_exit(fn ->
+ Application.put_env(:realtime, :master_region, original_master_region)
+ Clustered.stop()
+ end)
+
+ %{master_node: master_node}
+ end
+
+ test "create_tenant automatically routes to master region", %{master_node: master_node} do
+ external_id = "test_routing_#{System.unique_integer([:positive])}"
+
+ attrs = %{
+ "external_id" => external_id,
+ "name" => external_id,
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Mimic.expect(Realtime.GenRpc, :call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :create_tenant
+ assert opts[:tenant_id] == external_id
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ result = Api.create_tenant(attrs)
+
+ assert {:ok, %Tenant{} = tenant} = result
+ assert tenant.external_id == external_id
+
+ assert Realtime.Repo.get_by(Tenant, external_id: external_id)
+ end
+
+ test "update_tenant automatically routes to master region", %{master_node: master_node} do
+ # Create tenant on master node first
+ tenant_attrs = %{
+ "external_id" => "test_update_#{System.unique_integer([:positive])}",
+ "name" => "original",
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Realtime.GenRpc
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :create_tenant
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :update_tenant_by_external_id
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ tenant = tenant_fixture(tenant_attrs)
+
+ new_name = "updated_via_routing"
+ result = Api.update_tenant_by_external_id(tenant.external_id, %{name: new_name})
+
+ assert {:ok, %Tenant{} = updated} = result
+ assert updated.name == new_name
+
+ reloaded = Realtime.Repo.get(Tenant, tenant.id)
+ assert reloaded.name == new_name
+ end
+
+ test "delete_tenant_by_external_id automatically routes to master region", %{master_node: master_node} do
+ # Create tenant on master node first
+ tenant_attrs = %{
+ "external_id" => "test_delete_#{System.unique_integer([:positive])}",
+ "name" => "to_delete",
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Realtime.GenRpc
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :create_tenant
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :delete_tenant_by_external_id
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ tenant = tenant_fixture(tenant_attrs)
+
+ result = Api.delete_tenant_by_external_id(tenant.external_id)
+
+ assert result == true
+
+ refute Realtime.Repo.get(Tenant, tenant.id)
+ end
+
+ test "update_migrations_ran automatically routes to master region", %{master_node: master_node} do
+ # Create tenant on master node first
+ tenant_attrs = %{
+ "external_id" => "test_migrations_#{System.unique_integer([:positive])}",
+ "name" => "migrations_test",
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100,
+ "migrations_ran" => 0
+ }
+
+ Realtime.GenRpc
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :create_tenant
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :update_migrations_ran
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ tenant = tenant_fixture(tenant_attrs)
+
+ new_migrations_ran = 5
+ result = Api.update_migrations_ran(tenant.external_id, new_migrations_ran)
+
+ assert {:ok, updated} = result
+ assert updated.migrations_ran == new_migrations_ran
+
+ reloaded = Realtime.Repo.get(Tenant, tenant.id)
+ assert reloaded.migrations_ran == new_migrations_ran
+ end
+
+ test "returns error when Nodes.node_from_region returns {:error, :not_available}" do
+ external_id = "test_error_node_unavailable_#{System.unique_integer([:positive])}"
+
+ attrs = %{
+ "external_id" => external_id,
+ "name" => external_id,
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Mimic.expect(Nodes, :node_from_region, fn _region, _key -> {:error, :not_available} end)
+ result = Api.create_tenant(attrs)
+ assert {:error, :not_available} = result
+ end
+
+ test "returns error when GenRpc.call returns {:error, :rpc_error, reason}" do
+ external_id = "test_error_rpc_error_#{System.unique_integer([:positive])}"
+ rpc_error_reason = :timeout
+
+ attrs = %{
+ "external_id" => external_id,
+ "name" => external_id,
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Mimic.expect(GenRpc, :call, fn _node, _mod, _func, _args, _opts -> {:error, :rpc_error, rpc_error_reason} end)
+ result = Api.create_tenant(attrs)
+ assert {:error, ^rpc_error_reason} = result
+ end
+end
diff --git a/test/integration/rt_channel_test.exs b/test/integration/rt_channel_test.exs
index 806a5ad7e..c4160e4e4 100644
--- a/test/integration/rt_channel_test.exs
+++ b/test/integration/rt_channel_test.exs
@@ -1,83 +1,36 @@
defmodule Realtime.Integration.RtChannelTest do
- # async: false due to the fact that multiple operations against the same tenant and usage of mocks
- # Also using dev_tenant due to distributed test
- alias Realtime.Api
- use RealtimeWeb.ConnCase, async: false
- use Mimic
+ use RealtimeWeb.ConnCase,
+ async: true,
+ parameterize: [%{serializer: Phoenix.Socket.V1.JSONSerializer}, %{serializer: RealtimeWeb.Socket.V2Serializer}]
+
import ExUnit.CaptureLog
import Generators
- setup :set_mimic_global
-
require Logger
alias Extensions.PostgresCdcRls
-
alias Phoenix.Socket.Message
- alias Phoenix.Socket.V1
-
alias Postgrex
-
alias Realtime.Api.Tenant
alias Realtime.Database
alias Realtime.Integration.WebsocketClient
- alias Realtime.RateCounter
alias Realtime.Tenants
- alias Realtime.Tenants.Authorization
alias Realtime.Tenants.Connect
-
- alias RealtimeWeb.RealtimeChannel.Tracker
+ alias Realtime.Tenants.ReplicationConnection
alias RealtimeWeb.SocketDisconnect
@moduletag :capture_log
- @port 4003
- @serializer V1.JSONSerializer
-
- Application.put_env(:phoenix, TestEndpoint,
- https: false,
- http: [port: @port],
- debug_errors: false,
- server: true,
- pubsub_server: __MODULE__,
- secret_key_base: String.duplicate("a", 64)
- )
-
- setup_all do
- capture_log(fn -> start_supervised!(TestEndpoint) end)
- start_supervised!({Phoenix.PubSub, name: __MODULE__})
- :ok
- end
- setup [:mode]
-
- describe "postgres changes" do
- setup %{tenant: tenant} do
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- Database.transaction(conn, fn db_conn ->
- queries = [
- "drop table if exists public.test",
- "drop publication if exists supabase_realtime_test",
- "create sequence if not exists test_id_seq;",
- """
- create table if not exists "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication supabase_realtime_test for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
- end)
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
- :ok
- end
+ {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ assert Connect.ready?(tenant.external_id)
+ %{db_conn: db_conn, tenant: tenant}
+ end
- test "error subscribing", %{tenant: tenant} do
+ describe "postgres changes" do
+ test "error subscribing", %{tenant: tenant, serializer: serializer} do
{:ok, conn} = Database.connect(tenant, "realtime_test")
# Let's drop the publication to cause an error
@@ -85,7 +38,7 @@ defmodule Realtime.Integration.RtChannelTest do
Postgrex.query!(db_conn, "drop publication if exists supabase_realtime_test")
end)
- {socket, _} = get_connection(tenant)
+ {socket, _} = get_connection(tenant, serializer)
topic = "realtime:any"
config = %{postgres_changes: [%{event: "INSERT", schema: "public"}]}
@@ -99,7 +52,7 @@ defmodule Realtime.Integration.RtChannelTest do
"channel" => "any",
"extension" => "postgres_changes",
"message" =>
- "{:error, \"Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [event: INSERT, schema: public]\"}",
+ "Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [schema: public, table: *, filters: []]",
"status" => "error"
},
ref: nil,
@@ -112,8 +65,8 @@ defmodule Realtime.Integration.RtChannelTest do
assert log =~ "Unable to subscribe to changes with given parameters"
end
- test "handle insert", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
+ test "handle insert", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
topic = "realtime:any"
config = %{postgres_changes: [%{event: "INSERT", schema: "public"}]}
@@ -175,8 +128,8 @@ defmodule Realtime.Integration.RtChannelTest do
500
end
- test "handle update", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
+ test "handle update", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
topic = "realtime:any"
config = %{postgres_changes: [%{event: "UPDATE", schema: "public"}]}
@@ -242,8 +195,8 @@ defmodule Realtime.Integration.RtChannelTest do
500
end
- test "handle delete", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
+ test "handle delete", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
topic = "realtime:any"
config = %{postgres_changes: [%{event: "DELETE", schema: "public"}]}
@@ -307,8 +260,8 @@ defmodule Realtime.Integration.RtChannelTest do
500
end
- test "handle wildcard", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
+ test "handle wildcard", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
topic = "realtime:any"
config = %{postgres_changes: [%{event: "*", schema: "public"}]}
@@ -420,8 +373,8 @@ defmodule Realtime.Integration.RtChannelTest do
500
end
- test "handle nil postgres changes params as empty param changes", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
+ test "handle nil postgres changes params as empty param changes", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
topic = "realtime:any"
config = %{postgres_changes: [nil]}
@@ -448,8 +401,8 @@ defmodule Realtime.Integration.RtChannelTest do
describe "handle broadcast extension" do
setup [:rls_context]
- test "public broadcast", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
+ test "public broadcast", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
config = %{broadcast: %{self: true}, private: false}
topic = "realtime:any"
WebsocketClient.join(socket, topic, %{config: config})
@@ -463,15 +416,17 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
end
- test "broadcast to another tenant does not get mixed up", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
+ test "broadcast to another tenant does not get mixed up", %{tenant: tenant, serializer: serializer} do
+ other_tenant = Containers.checkout_tenant(run_migrations: true)
+
+ Realtime.Tenants.Cache.update_cache(other_tenant)
+
+ {socket, _} = get_connection(tenant, serializer)
config = %{broadcast: %{self: false}, private: false}
topic = "realtime:any"
WebsocketClient.join(socket, topic, %{config: config})
- other_tenant = Containers.checkout_tenant(run_migrations: true)
-
- {other_socket, _} = get_connection(other_tenant)
+ {other_socket, _} = get_connection(other_tenant, serializer)
WebsocketClient.join(other_socket, topic, %{config: config})
# Both sockets joined
@@ -488,8 +443,12 @@ defmodule Realtime.Integration.RtChannelTest do
end
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "private broadcast with valid channel with permissions sends message", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
+ test "private broadcast with valid channel with permissions sends message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: true}
topic = "realtime:#{topic}"
WebsocketClient.join(socket, topic, %{config: config})
@@ -503,61 +462,12 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}
end
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
- mode: :distributed
- test "private broadcast with valid channel with permissions sends message using a remote node (phoenix adapter)", %{
- tenant: tenant,
- topic: topic
- } do
- {:ok, token} =
- generate_token(tenant, %{exp: System.system_time(:second) + 1000, role: "authenticated", sub: random_string()})
-
- {:ok, remote_socket} = WebsocketClient.connect(self(), uri(tenant, 4012), @serializer, [{"x-api-key", token}])
- {:ok, socket} = WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", token}])
-
- config = %{broadcast: %{self: false}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(remote_socket, topic, %{config: config})
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Send through one socket and receive through the other (self: false)
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
-
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
- mode: :distributed
- test "private broadcast with valid channel with permissions sends message using a remote node", %{
- tenant: tenant,
- topic: topic
- } do
- {:ok, token} =
- generate_token(tenant, %{exp: System.system_time(:second) + 1000, role: "authenticated", sub: random_string()})
-
- {:ok, remote_socket} = WebsocketClient.connect(self(), uri(tenant, 4012), @serializer, [{"x-api-key", token}])
- {:ok, socket} = WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", token}])
-
- config = %{broadcast: %{self: false}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(remote_socket, topic, %{config: config})
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Send through one socket and receive through the other (self: false)
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- end
-
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
topic: "topic"
test "private broadcast with valid channel a colon character sends message and won't intercept in public channels",
- %{topic: topic, tenant: tenant} do
- {anon_socket, _} = get_connection(tenant, "anon")
- {socket, _} = get_connection(tenant, "authenticated")
+ %{topic: topic, tenant: tenant, serializer: serializer} do
+ {anon_socket, _} = get_connection(tenant, serializer, role: "anon")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
valid_topic = "realtime:#{topic}"
malicious_topic = "realtime:private:#{topic}"
@@ -579,17 +489,18 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_broadcast_and_presence]
test "private broadcast with valid channel no write permissions won't send message but will receive message", %{
tenant: tenant,
- topic: topic
+ topic: topic,
+ serializer: serializer
} do
config = %{broadcast: %{self: true}, private: true}
topic = "realtime:#{topic}"
- {service_role_socket, _} = get_connection(tenant, "service_role")
+ {service_role_socket, _} = get_connection(tenant, serializer, role: "service_role")
WebsocketClient.join(service_role_socket, topic, %{config: config})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{event: "presence_state"}
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
WebsocketClient.join(socket, topic, %{config: config})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{event: "presence_state"}
@@ -605,12 +516,16 @@ defmodule Realtime.Integration.RtChannelTest do
end
@tag policies: []
- test "private broadcast with valid channel and no read permissions won't join", %{tenant: tenant, topic: topic} do
+ test "private broadcast with valid channel and no read permissions won't join", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
config = %{private: true}
expected = "Unauthorized: You do not have permissions to read from this Channel topic: #{topic}"
topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
log =
capture_log(fn ->
@@ -636,14 +551,18 @@ defmodule Realtime.Integration.RtChannelTest do
end
@tag policies: [:authenticated_read_broadcast_and_presence]
- test "handles lack of connection to database error on private channels", %{tenant: tenant, topic: topic} do
+ test "handles lack of connection to database error on private channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
WebsocketClient.join(socket, topic, %{config: %{broadcast: %{self: true}, private: true}})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{event: "presence_state"}
- {service_role_socket, _} = get_connection(tenant, "service_role")
+ {service_role_socket, _} = get_connection(tenant, serializer, role: "service_role")
WebsocketClient.join(service_role_socket, topic, %{config: %{broadcast: %{self: false}, private: true}})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{event: "presence_state"}
@@ -653,22 +572,26 @@ defmodule Realtime.Integration.RtChannelTest do
:syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
WebsocketClient.send_event(service_role_socket, topic, "broadcast", payload)
- # Waiting more than 5 seconds as this is the amount of time we will wait for the Connection to be ready
- refute_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 6000
+ # Waiting more than 15 seconds as this is the amount of time we will wait for the Connection to be ready
+ refute_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 16000
end)
assert log =~ "UnableToHandleBroadcast"
end
@tag policies: []
- test "lack of connection to database error does not impact public channels", %{tenant: tenant, topic: topic} do
+ test "lack of connection to database error does not impact public channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
WebsocketClient.join(socket, topic, %{config: %{broadcast: %{self: true}, private: false}})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{event: "presence_state"}
- {service_role_socket, _} = get_connection(tenant, "service_role")
+ {service_role_socket, _} = get_connection(tenant, serializer, role: "service_role")
WebsocketClient.join(service_role_socket, topic, %{config: %{broadcast: %{self: false}, private: false}})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{event: "presence_state"}
@@ -688,8 +611,8 @@ defmodule Realtime.Integration.RtChannelTest do
describe "handle presence extension" do
setup [:rls_context]
- test "public presence", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
+ test "public presence", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
config = %{presence: %{key: "", enabled: true}, private: false}
topic = "realtime:any"
@@ -715,8 +638,8 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
test "private presence with read and write permissions will be able to track and receive presence changes",
- %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{presence: %{key: "", enabled: true}, private: true}
topic = "realtime:#{topic}"
@@ -740,8 +663,8 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
mode: :distributed
test "private presence with read and write permissions will be able to track and receive presence changes using a remote node",
- %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{presence: %{key: "", enabled: true}, private: true}
topic = "realtime:#{topic}"
@@ -764,9 +687,9 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_broadcast_and_presence]
test "private presence with read permissions will be able to receive presence changes but won't be able to track",
- %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- {secondary_socket, _} = get_connection(tenant, "service_role")
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ {secondary_socket, _} = get_connection(tenant, serializer, role: "service_role")
config = fn key -> %{presence: %{key: key, enabled: true}, private: true} end
topic = "realtime:#{topic}"
@@ -816,9 +739,13 @@ defmodule Realtime.Integration.RtChannelTest do
end
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "handles lack of connection to database error on private channels", %{tenant: tenant, topic: topic} do
+ test "handles lack of connection to database error on private channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: true}}})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{event: "presence_state"}
@@ -831,16 +758,20 @@ defmodule Realtime.Integration.RtChannelTest do
refute_receive %Message{event: "presence_diff"}, 500
# Waiting more than 5 seconds as this is the amount of time we will wait for the Connection to be ready
- refute_receive %Message{event: "phx_leave", topic: ^topic}, 6000
+ refute_receive %Message{event: "phx_leave", topic: ^topic}, 16000
end)
- assert log =~ "UnableToHandlePresence"
+ assert log =~ ~r/external_id=#{tenant.external_id}.*UnableToHandlePresence/
end
@tag policies: []
- test "lack of connection to database error does not impact public channels", %{tenant: tenant, topic: topic} do
+ test "lack of connection to database error does not impact public channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: true}}})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{event: "presence_state"}
@@ -855,16 +786,17 @@ defmodule Realtime.Integration.RtChannelTest do
refute_receive %Message{event: "phx_leave", topic: ^topic}
end)
- refute log =~ "UnableToHandlePresence"
+ refute log =~ ~r/external_id=#{tenant.external_id}.*UnableToHandlePresence/
end
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
test "presence enabled if param enabled is set in configuration for private channels", %{
tenant: tenant,
- topic: topic
+ topic: topic,
+ serializer: serializer
} do
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
topic = "realtime:#{topic}"
WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: true}}})
@@ -876,9 +808,10 @@ defmodule Realtime.Integration.RtChannelTest do
test "presence disabled if param 'enabled' is set to false in configuration for private channels", %{
tenant: tenant,
- topic: topic
+ topic: topic,
+ serializer: serializer
} do
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
topic = "realtime:#{topic}"
WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: false}}})
@@ -888,9 +821,10 @@ defmodule Realtime.Integration.RtChannelTest do
test "presence enabled if param enabled is set in configuration for public channels", %{
tenant: tenant,
- topic: topic
+ topic: topic,
+ serializer: serializer
} do
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
topic = "realtime:#{topic}"
WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: true}}})
@@ -900,15 +834,69 @@ defmodule Realtime.Integration.RtChannelTest do
test "presence disabled if param 'enabled' is set to false in configuration for public channels", %{
tenant: tenant,
- topic: topic
+ topic: topic,
+ serializer: serializer
} do
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
topic = "realtime:#{topic}"
WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: false}}})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
refute_receive %Message{event: "presence_state"}, 500
end
+
+ test "presence automatically enabled when user sends track message for public channel", %{
+ tenant: tenant,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{presence: %{key: "", enabled: false}, private: false}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ refute_receive %Message{event: "presence_state"}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}
+
+ join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "presence automatically enabled when user sends track message for private channel",
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{presence: %{key: "", enabled: false}, private: true}
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ refute_receive %Message{event: "presence_state"}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}, 500
+ join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+ end
end
describe "token handling" do
@@ -918,32 +906,40 @@ defmodule Realtime.Integration.RtChannelTest do
:authenticated_read_broadcast_and_presence,
:authenticated_write_broadcast_and_presence
]
- test "badly formatted jwt token", %{tenant: tenant} do
+ test "badly formatted jwt token", %{tenant: tenant, serializer: serializer} do
log =
capture_log(fn ->
- WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", "bad_token"}])
+ WebsocketClient.connect(self(), uri(tenant, serializer), serializer, [{"x-api-key", "bad_token"}])
end)
assert log =~ "MalformedJWT: The token provided is not a valid JWT"
end
- test "invalid JWT with expired token", %{tenant: tenant} do
+ test "invalid JWT with expired token", %{tenant: tenant, serializer: serializer} do
log =
capture_log(fn ->
- get_connection(tenant, "authenticated", %{:exp => System.system_time(:second) - 1000}, %{log_level: :info})
+ get_connection(tenant, serializer,
+ role: "authenticated",
+ claims: %{:exp => System.system_time(:second) - 1000},
+ params: %{log_level: :info}
+ )
end)
assert log =~ "InvalidJWTToken: Token has expired"
end
- test "token required the role key", %{tenant: tenant} do
+ test "token required the role key", %{tenant: tenant, serializer: serializer} do
{:ok, token} = token_no_role(tenant)
assert {:error, %{status_code: 403}} =
- WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", token}])
+ WebsocketClient.connect(self(), uri(tenant, serializer), serializer, [{"x-api-key", token}])
end
- test "handles connection with valid api-header but ignorable access_token payload", %{tenant: tenant, topic: topic} do
+ test "handles connection with valid api-header but ignorable access_token payload", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
realtime_topic = "realtime:#{topic}"
log =
@@ -955,7 +951,7 @@ defmodule Realtime.Integration.RtChannelTest do
sub: random_string()
})
- {:ok, socket} = WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", token}])
+ {:ok, socket} = WebsocketClient.connect(self(), uri(tenant, serializer), serializer, [{"x-api-key", token}])
WebsocketClient.join(socket, realtime_topic, %{
config: %{broadcast: %{self: true}, private: false},
@@ -971,8 +967,8 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
test "on new access_token and channel is private policies are reevaluated for read policy",
- %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
realtime_topic = "realtime:#{topic}"
@@ -1002,9 +998,10 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
test "on new access_token and channel is private policies are reevaluated for write policy", %{
topic: topic,
- tenant: tenant
+ tenant: tenant,
+ serializer: serializer
} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
realtime_topic = "realtime:#{topic}"
config = %{broadcast: %{self: true}, private: true}
WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
@@ -1041,8 +1038,12 @@ defmodule Realtime.Integration.RtChannelTest do
1500
end
- test "on new access_token and channel is public policies are not reevaluated", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ test "on new access_token and channel is public policies are not reevaluated", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
{:ok, new_token} = token_valid(tenant, "anon")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1057,8 +1058,12 @@ defmodule Realtime.Integration.RtChannelTest do
refute_receive %Message{}
end
- test "on empty string access_token the socket sends an error message", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ test "on empty string access_token the socket sends an error message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1083,10 +1088,14 @@ defmodule Realtime.Integration.RtChannelTest do
assert msg =~ "The token provided is not a valid JWT"
end
- test "on expired access_token the socket sends an error message", %{tenant: tenant, topic: topic} do
+ test "on expired access_token the socket sends an error message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
sub = random_string()
- {socket, access_token} = get_connection(tenant, "authenticated", %{sub: sub})
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated", claims: %{sub: sub})
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1098,23 +1107,31 @@ defmodule Realtime.Integration.RtChannelTest do
{:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) - 1000, sub: sub})
log =
- capture_log([log_level: :warning], fn ->
+ capture_log(fn ->
WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
assert_receive %Message{
topic: ^realtime_topic,
event: "system",
- payload: %{"extension" => "system", "message" => "Token has expired 1000 seconds ago", "status" => "error"}
+ payload: %{"extension" => "system", "message" => "Token has expired " <> _, "status" => "error"}
}
+
+ assert_receive %Message{event: "phx_close", topic: ^realtime_topic}
end)
- assert log =~ "ChannelShutdown: Token has expired 1000 seconds ago"
+ assert log =~ "ChannelShutdown: Token has expired"
end
- test "ChannelShutdown include sub if available in jwt claims", %{tenant: tenant, topic: topic} do
+ test "ChannelShutdown include sub if available in jwt claims", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
exp = System.system_time(:second) + 10_000
- {socket, access_token} = get_connection(tenant, "authenticated", %{exp: exp}, %{log_level: :warning})
+ {socket, access_token} =
+ get_connection(tenant, serializer, role: "authenticated", claims: %{exp: exp}, params: %{log_level: :warning})
+
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
sub = random_string()
@@ -1126,14 +1143,15 @@ defmodule Realtime.Integration.RtChannelTest do
WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
assert_receive %Message{event: "system"}, 1000
+ assert_receive %Message{event: "phx_close", topic: ^realtime_topic}
end)
assert log =~ "ChannelShutdown"
assert log =~ "sub=#{sub}"
end
- test "missing claims close connection", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ test "missing claims close connection", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1160,8 +1178,8 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_close"}
end
- test "checks token periodically", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ test "checks token periodically", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1171,7 +1189,8 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
assert_receive %Message{event: "presence_state"}, 500
- {:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) + 2, role: "authenticated"})
+ {:ok, token} =
+ generate_token(tenant, %{:exp => System.system_time(:second) + 2, role: "authenticated"})
# Update token to be a near expiring token
WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
@@ -1188,8 +1207,8 @@ defmodule Realtime.Integration.RtChannelTest do
assert msg =~ "Token has expired"
end
- test "token expires in between joins", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ test "token expires in between joins", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1198,7 +1217,8 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
assert_receive %Message{event: "presence_state"}, 500
- {:ok, access_token} = generate_token(tenant, %{:exp => System.system_time(:second) + 1, role: "authenticated"})
+ {:ok, access_token} =
+ generate_token(tenant, %{:exp => System.system_time(:second) + 1, role: "authenticated"})
# token expires in between joins so it needs to be handled by the channel and not the socket
Process.sleep(1000)
@@ -1223,8 +1243,8 @@ defmodule Realtime.Integration.RtChannelTest do
assert log =~ "#{tenant.external_id}"
end
- test "token loses claims in between joins", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ test "token loses claims in between joins", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1254,8 +1274,8 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_close"}
end
- test "token is badly formatted in between joins", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
+ test "token is badly formatted in between joins", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1282,56 +1302,18 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_close"}
end
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "handles RPC error on token refreshed", %{tenant: tenant, topic: topic} do
- Authorization
- |> expect(:get_read_authorizations, fn conn, db_conn, context ->
- call_original(Authorization, :get_read_authorizations, [conn, db_conn, context])
- end)
- |> expect(:get_read_authorizations, fn _, _, _ -> {:error, "RPC Error"} end)
-
- {socket, access_token} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Phoenix.Socket.Message{event: "phx_reply"}, 500
- assert_receive %Phoenix.Socket.Message{event: "presence_state"}, 500
-
- # Update token to force update
- {:ok, access_token} =
- generate_token(tenant, %{:exp => System.system_time(:second) + 1000, role: "authenticated"})
-
- log =
- capture_log([log_level: :warning], fn ->
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => access_token})
-
- assert_receive %Phoenix.Socket.Message{
- event: "system",
- payload: %{
- "status" => "error",
- "extension" => "system",
- "message" => "Realtime was unable to connect to the project database"
- },
- topic: ^realtime_topic
- },
- 500
-
- assert_receive %Phoenix.Socket.Message{event: "phx_close", topic: ^realtime_topic}
- end)
-
- assert log =~ "Realtime was unable to connect to the project database"
- end
-
test "on sb prefixed access_token the socket ignores the message and respects JWT expiry time", %{
tenant: tenant,
- topic: topic
+ topic: topic,
+ serializer: serializer
} do
sub = random_string()
{socket, access_token} =
- get_connection(tenant, "authenticated", %{sub: sub, exp: System.system_time(:second) + 5})
+ get_connection(tenant, serializer,
+ role: "authenticated",
+ claims: %{sub: sub, exp: System.system_time(:second) + 5}
+ )
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1369,9 +1351,10 @@ defmodule Realtime.Integration.RtChannelTest do
tenant: tenant,
topic: topic,
db_conn: db_conn,
- table_name: table_name
+ table_name: table_name,
+ serializer: serializer
} do
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: true}
topic = "realtime:#{topic}"
@@ -1409,10 +1392,11 @@ defmodule Realtime.Integration.RtChannelTest do
tenant: tenant,
topic: topic,
db_conn: db_conn,
- table_name: table_name
+ table_name: table_name,
+ serializer: serializer
} do
value = random_string()
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: true}
topic = "realtime:#{topic}"
@@ -1452,9 +1436,10 @@ defmodule Realtime.Integration.RtChannelTest do
tenant: tenant,
topic: topic,
db_conn: db_conn,
- table_name: table_name
+ table_name: table_name,
+ serializer: serializer
} do
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: true}
topic = "realtime:#{topic}"
@@ -1492,9 +1477,10 @@ defmodule Realtime.Integration.RtChannelTest do
test "broadcast event when function 'send' is called with private topic", %{
tenant: tenant,
topic: topic,
- db_conn: db_conn
+ db_conn: db_conn,
+ serializer: serializer
} do
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: true}
full_topic = "realtime:#{topic}"
@@ -1529,9 +1515,10 @@ defmodule Realtime.Integration.RtChannelTest do
test "broadcast event when function 'send' is called with public topic", %{
tenant: tenant,
topic: topic,
- db_conn: db_conn
+ db_conn: db_conn,
+ serializer: serializer
} do
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
full_topic = "realtime:#{topic}"
@@ -1568,11 +1555,11 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
test "user with only private channels enabled will not be able to join public channels", %{
tenant: tenant,
- topic: topic
+ topic: topic,
+ serializer: serializer
} do
change_tenant_configuration(tenant, :private_only, true)
- on_exit(fn -> change_tenant_configuration(tenant, :private_only, false) end)
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
topic = "realtime:#{topic}"
@@ -1593,14 +1580,14 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
test "user with only private channels enabled will be able to join private channels", %{
tenant: tenant,
- topic: topic
+ topic: topic,
+ serializer: serializer
} do
change_tenant_configuration(tenant, :private_only, true)
- on_exit(fn -> change_tenant_configuration(tenant, :private_only, false) end)
Process.sleep(100)
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: true}
topic = "realtime:#{topic}"
WebsocketClient.join(socket, topic, %{config: config})
@@ -1612,21 +1599,19 @@ defmodule Realtime.Integration.RtChannelTest do
describe "socket disconnect" do
setup [:rls_context]
- test "tenant already suspended", %{topic: _topic} do
- tenant = Containers.checkout_tenant(run_migrations: true)
-
+ test "tenant already suspended", %{tenant: tenant, serializer: serializer} do
log =
capture_log(fn ->
- {:ok, _} = Realtime.Api.update_tenant(tenant, %{suspend: true})
- {:error, %Mint.WebSocket.UpgradeFailureError{}} = get_connection(tenant, "anon")
+ change_tenant_configuration(tenant, :suspend, true)
+ {:error, %Mint.WebSocket.UpgradeFailureError{}} = get_connection(tenant, serializer, role: "anon")
refute_receive _any
end)
assert log =~ "RealtimeDisabledForTenant"
end
- test "on jwks the socket closes and sends a system message", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
+ test "on jwks the socket closes and sends a system message", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1634,14 +1619,17 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
assert_receive %Message{event: "presence_state"}, 500
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- Realtime.Api.update_tenant(tenant, %{jwt_jwks: %{keys: ["potato"]}})
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{jwt_jwks: %{keys: ["potato"]}})
assert_process_down(socket)
end
- test "on jwt_secret the socket closes and sends a system message", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
+ test "on jwt_secret the socket closes and sends a system message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1650,14 +1638,16 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
assert_receive %Message{event: "presence_state"}, 500
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- Realtime.Api.update_tenant(tenant, %{jwt_secret: "potato"})
-
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{jwt_secret: "potato"})
assert_process_down(socket)
end
- test "on private_only the socket closes and sends a system message", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
+ test "on private_only the socket closes and sends a system message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1666,14 +1656,16 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
assert_receive %Message{event: "presence_state"}, 500
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- Realtime.Api.update_tenant(tenant, %{private_only: true})
-
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{private_only: true})
assert_process_down(socket)
end
- test "on other param changes the socket won't close and no message is sent", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
+ test "on other param changes the socket won't close and no message is sent", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{topic}"
@@ -1682,8 +1674,7 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
assert_receive %Message{event: "presence_state"}, 500
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- Realtime.Api.update_tenant(tenant, %{max_concurrent_users: 100})
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{max_concurrent_users: 100})
refute_receive %Message{
topic: ^realtime_topic,
@@ -1700,17 +1691,24 @@ defmodule Realtime.Integration.RtChannelTest do
assert :ok = WebsocketClient.send_heartbeat(socket)
end
- test "invalid JWT with expired token", %{tenant: tenant} do
+ test "invalid JWT with expired token", %{tenant: tenant, serializer: serializer} do
log =
capture_log(fn ->
- get_connection(tenant, "authenticated", %{:exp => System.system_time(:second) - 1000}, %{log_level: :info})
+ get_connection(tenant, serializer,
+ role: "authenticated",
+ claims: %{:exp => System.system_time(:second) - 1000},
+ params: %{log_level: :info}
+ )
end)
assert log =~ "InvalidJWTToken: Token has expired"
end
- test "check registry of SocketDisconnect and on distribution called, kill socket", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "authenticated")
+ test "check registry of SocketDisconnect and on distribution called, kill socket", %{
+ tenant: tenant,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
for _ <- 1..10 do
@@ -1732,11 +1730,11 @@ defmodule Realtime.Integration.RtChannelTest do
describe "rate limits" do
setup [:rls_context]
- test "max_concurrent_users limit respected", %{tenant: tenant} do
+ test "max_concurrent_users limit respected", %{tenant: tenant, serializer: serializer} do
%{max_concurrent_users: max_concurrent_users} = Tenants.get_tenant_by_external_id(tenant.external_id)
change_tenant_configuration(tenant, :max_concurrent_users, 1)
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{random_string()}"
WebsocketClient.join(socket, realtime_topic, %{config: config})
@@ -1758,14 +1756,12 @@ defmodule Realtime.Integration.RtChannelTest do
change_tenant_configuration(tenant, :max_concurrent_users, max_concurrent_users)
end
- test "max_events_per_second limit respected", %{tenant: tenant} do
- %{max_events_per_second: max_events_per_second} = Tenants.get_tenant_by_external_id(tenant.external_id)
- on_exit(fn -> change_tenant_configuration(tenant, :max_events_per_second, max_events_per_second) end)
- RateCounter.stop(tenant.external_id)
+ test "max_events_per_second limit respected", %{tenant: tenant, serializer: serializer} do
+ RateCounterHelper.stop(tenant.external_id)
log =
capture_log(fn ->
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
realtime_topic = "realtime:#{random_string()}"
@@ -1785,11 +1781,10 @@ defmodule Realtime.Integration.RtChannelTest do
assert log =~ "MessagePerSecondRateLimitReached"
end
- test "max_channels_per_client limit respected", %{tenant: tenant} do
- %{max_events_per_second: max_concurrent_users} = Tenants.get_tenant_by_external_id(tenant.external_id)
+ test "max_channels_per_client limit respected", %{tenant: tenant, serializer: serializer} do
change_tenant_configuration(tenant, :max_channels_per_client, 1)
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic_1 = "realtime:#{random_string()}"
realtime_topic_2 = "realtime:#{random_string()}"
@@ -1820,12 +1815,10 @@ defmodule Realtime.Integration.RtChannelTest do
refute_receive %Message{event: "phx_reply", topic: ^realtime_topic_2}, 500
refute_receive %Message{event: "presence_state", topic: ^realtime_topic_2}, 500
-
- change_tenant_configuration(tenant, :max_channels_per_client, max_concurrent_users)
end
- test "max_joins_per_second limit respected", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "authenticated")
+ test "max_joins_per_second limit respected", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:#{random_string()}"
@@ -1838,6 +1831,7 @@ defmodule Realtime.Integration.RtChannelTest do
# Wait for RateCounter tick
Process.sleep(1000)
+
# These ones will be blocked
for _ <- 1..300 do
WebsocketClient.join(socket, realtime_topic, %{config: config})
@@ -1858,9 +1852,8 @@ defmodule Realtime.Integration.RtChannelTest do
assert log =~
"project=#{tenant.external_id} external_id=#{tenant.external_id} [critical] ClientJoinRateLimitReached: Too many joins per second"
- # Only one log message should be emitted
- # Splitting by the error message returns the error message and the rest of the log only
- assert length(String.split(log, "ClientJoinRateLimitReached")) == 2
+ # Only one or two log messages should be emitted
+ assert length(String.split(log, "ClientJoinRateLimitReached")) <= 3
end
end
@@ -1868,8 +1861,8 @@ defmodule Realtime.Integration.RtChannelTest do
setup [:rls_context]
@tag policies: [:read_matching_user_role, :write_matching_user_role], role: "anon"
- test "role policies are respected when accessing the channel", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "anon")
+ test "role policies are respected when accessing the channel", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "anon")
config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
topic = random_string()
realtime_topic = "realtime:#{topic}"
@@ -1878,7 +1871,7 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
- {socket, _} = get_connection(tenant, "potato")
+ {socket, _} = get_connection(tenant, serializer, role: "potato")
topic = random_string()
realtime_topic = "realtime:#{topic}"
@@ -1888,8 +1881,8 @@ defmodule Realtime.Integration.RtChannelTest do
@tag policies: [:authenticated_read_matching_user_sub, :authenticated_write_matching_user_sub],
sub: Ecto.UUID.generate()
- test "sub policies are respected when accessing the channel", %{tenant: tenant, sub: sub} do
- {socket, _} = get_connection(tenant, "authenticated", %{sub: sub})
+ test "sub policies are respected when accessing the channel", %{tenant: tenant, sub: sub, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated", claims: %{sub: sub})
config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
topic = random_string()
realtime_topic = "realtime:#{topic}"
@@ -1898,7 +1891,7 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
- {socket, _} = get_connection(tenant, "authenticated", %{sub: Ecto.UUID.generate()})
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated", claims: %{sub: Ecto.UUID.generate()})
topic = random_string()
realtime_topic = "realtime:#{topic}"
@@ -1906,11 +1899,9 @@ defmodule Realtime.Integration.RtChannelTest do
refute_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
end
- @tag role: "authenticated",
- policies: [:broken_read_presence, :broken_write_presence]
-
- test "handle failing rls policy", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "authenticated")
+ @tag role: "authenticated", policies: [:broken_read_presence, :broken_write_presence]
+ test "handle failing rls policy", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: true}
topic = random_string()
realtime_topic = "realtime:#{topic}"
@@ -1940,8 +1931,8 @@ defmodule Realtime.Integration.RtChannelTest do
end
end
- test "handle empty topic by closing the socket", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "authenticated")
+ test "handle empty topic by closing the socket", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
config = %{broadcast: %{self: true}, private: false}
realtime_topic = "realtime:"
@@ -1962,20 +1953,35 @@ defmodule Realtime.Integration.RtChannelTest do
refute_receive %Message{event: "presence_state"}
end
- def handle_telemetry(event, %{sum: sum}, metadata, _) do
+ def handle_telemetry(event, measurements, metadata, name) do
tenant = metadata[:tenant]
[key] = Enum.take(event, -1)
+ value = Map.get(measurements, :sum) || Map.get(measurements, :value) || Map.get(measurements, :size) || 0
+
+ Agent.update(name, fn state ->
+ state =
+ Map.put_new(
+ state,
+ tenant,
+ %{
+ joins: 0,
+ events: 0,
+ db_events: 0,
+ presence_events: 0,
+ output_bytes: 0,
+ input_bytes: 0
+ }
+ )
- Agent.update(TestCounter, fn state ->
- state = Map.put_new(state, tenant, %{joins: 0, events: 0, db_events: 0, presence_events: 0})
- update_in(state, [metadata[:tenant], key], fn v -> (v || 0) + sum end)
+ update_in(state, [metadata[:tenant], key], fn v -> (v || 0) + value end)
end)
end
defp get_count(event, tenant) do
[key] = Enum.take(event, -1)
- Agent.get(TestCounter, fn state -> get_in(state, [tenant, key]) || 0 end)
+ :"TestCounter_#{tenant}"
+ |> Agent.get(fn state -> get_in(state, [tenant, key]) || 0 end)
end
describe "billable events" do
@@ -1987,45 +1993,24 @@ defmodule Realtime.Integration.RtChannelTest do
[:realtime, :rate_counter, :channel, :presence_events]
]
+ name = :"TestCounter_#{tenant.external_id}"
+
{:ok, _} =
start_supervised(%{
id: 1,
- start: {Agent, :start_link, [fn -> %{} end, [name: TestCounter]]}
+ start: {Agent, :start_link, [fn -> %{} end, [name: name]]}
})
- RateCounter.stop(tenant.external_id)
- on_exit(fn -> :telemetry.detach(__MODULE__) end)
- :telemetry.attach_many(__MODULE__, events, &__MODULE__.handle_telemetry/4, [])
-
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- # Setup for postgres changes
- Database.transaction(conn, fn db_conn ->
- queries = [
- "drop table if exists public.test",
- "drop publication if exists supabase_realtime_test",
- "create sequence if not exists test_id_seq;",
- """
- create table if not exists "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication supabase_realtime_test for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
- end)
+ RateCounterHelper.stop(tenant.external_id)
+ on_exit(fn -> :telemetry.detach({__MODULE__, tenant.external_id}) end)
+ :telemetry.attach_many({__MODULE__, tenant.external_id}, events, &__MODULE__.handle_telemetry/4, name)
:ok
end
- test "join events", %{tenant: tenant} do
+ test "join events", %{tenant: tenant, serializer: serializer} do
external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
+ {socket, _} = get_connection(tenant, serializer)
config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "public"}]}
topic = "realtime:any"
@@ -2037,7 +2022,7 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{topic: ^topic, event: "system"}, 5000
# Wait for RateCounter to run
- Process.sleep(2000)
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
# Expected billed
# 1 joins due to two sockets
@@ -2050,21 +2035,21 @@ defmodule Realtime.Integration.RtChannelTest do
assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
end
- test "broadcast events", %{tenant: tenant} do
+ test "broadcast events", %{tenant: tenant, serializer: serializer} do
external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
+ {socket1, _} = get_connection(tenant, serializer)
config = %{broadcast: %{self: true}}
topic = "realtime:any"
- WebsocketClient.join(socket, topic, %{config: config})
+ WebsocketClient.join(socket1, topic, %{config: config})
# Join events
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{topic: ^topic, event: "presence_state"}
# Add second client so we can test the "multiplication" of billable events
- {socket, _} = get_connection(tenant)
- WebsocketClient.join(socket, topic, %{config: config})
+ {socket2, _} = get_connection(tenant, serializer)
+ WebsocketClient.join(socket2, topic, %{config: config})
# Join events
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
@@ -2074,12 +2059,16 @@ defmodule Realtime.Integration.RtChannelTest do
payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
for _ <- 1..5 do
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
+ WebsocketClient.send_event(socket1, topic, "broadcast", payload)
+ # both sockets
+ assert_receive %Message{topic: ^topic, event: "broadcast", payload: ^payload}
assert_receive %Message{topic: ^topic, event: "broadcast", payload: ^payload}
end
+ refute_receive _any
+
# Wait for RateCounter to run
- Process.sleep(2000)
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
# Expected billed
# 2 joins due to two sockets
@@ -2092,9 +2081,9 @@ defmodule Realtime.Integration.RtChannelTest do
assert 15 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
end
- test "presence events", %{tenant: tenant} do
+ test "presence events", %{tenant: tenant, serializer: serializer} do
external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
+ {socket, _} = get_connection(tenant, serializer)
config = %{broadcast: %{self: true}, presence: %{enabled: true}}
topic = "realtime:any"
@@ -2114,7 +2103,7 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "presence_diff", payload: %{"joins" => _, "leaves" => %{}}, topic: ^topic}
# Presence events
- {socket, _} = get_connection(tenant, "authenticated")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
WebsocketClient.join(socket, topic, %{config: config})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
@@ -2131,7 +2120,7 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{event: "presence_diff", payload: %{"joins" => _, "leaves" => %{}}, topic: ^topic}
# Wait for RateCounter to run
- Process.sleep(2000)
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
# Expected billed
# 2 joins due to two sockets
@@ -2144,9 +2133,9 @@ defmodule Realtime.Integration.RtChannelTest do
assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
end
- test "postgres changes events", %{tenant: tenant} do
+ test "postgres changes events", %{tenant: tenant, serializer: serializer} do
external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
+ {socket, _} = get_connection(tenant, serializer)
config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "public"}]}
topic = "realtime:any"
@@ -2158,7 +2147,7 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{topic: ^topic, event: "system"}, 5000
# Add second user to test the "multiplication" of billable events
- {socket, _} = get_connection(tenant)
+ {socket, _} = get_connection(tenant, serializer)
WebsocketClient.join(socket, topic, %{config: config})
assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
assert_receive %Message{topic: ^topic, event: "presence_state"}, 500
@@ -2170,7 +2159,7 @@ defmodule Realtime.Integration.RtChannelTest do
# Postgres Change events
for _ <- 1..5, do: Postgrex.query!(conn, "insert into test (details) values ('test')", [])
- for _ <- 1..5 do
+ for _ <- 1..10 do
assert_receive %Message{
topic: ^topic,
event: "postgres_changes",
@@ -2180,7 +2169,7 @@ defmodule Realtime.Integration.RtChannelTest do
end
# Wait for RateCounter to run
- Process.sleep(2000)
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
# Expected billed
# 2 joins due to two sockets
@@ -2189,13 +2178,14 @@ defmodule Realtime.Integration.RtChannelTest do
# 0 events as no broadcast used
assert 2 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
assert 2 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
+ # (5 for each websocket)
assert 10 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
end
- test "postgres changes error events", %{tenant: tenant} do
+ test "postgres changes error events", %{tenant: tenant, serializer: serializer} do
external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
+ {socket, _} = get_connection(tenant, serializer)
config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "none"}]}
topic = "realtime:any"
@@ -2207,7 +2197,7 @@ defmodule Realtime.Integration.RtChannelTest do
assert_receive %Message{topic: ^topic, event: "system"}, 5000
# Wait for RateCounter to run
- Process.sleep(2000)
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
# Expected billed
# 1 joins due to one socket
@@ -2221,126 +2211,133 @@ defmodule Realtime.Integration.RtChannelTest do
end
end
- test "tracks and untracks properly channels", %{tenant: tenant} do
- assert [] = Tracker.list_pids()
+ describe "WAL bloat handling" do
+ setup %{tenant: tenant} do
+ topic = random_string()
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
+ %{rows: [[max_wal_size]]} = Postgrex.query!(db_conn, "SHOW max_wal_size", [])
+ %{rows: [[wal_keep_size]]} = Postgrex.query!(db_conn, "SHOW wal_keep_size", [])
+ %{rows: [[max_slot_wal_keep_size]]} = Postgrex.query!(db_conn, "SHOW max_slot_wal_keep_size", [])
- topics =
- for _ <- 1..10 do
- topic = "realtime:#{random_string()}"
- :ok = WebsocketClient.join(socket, topic, %{config: config})
- assert_receive %Message{topic: ^topic, event: "phx_reply"}, 500
- topic
- end
+ assert max_wal_size == "32MB"
+ assert wal_keep_size == "32MB"
+ assert max_slot_wal_keep_size == "32MB"
- assert [{_pid, count}] = Tracker.list_pids()
- assert count == length(topics)
+ Postgrex.query!(db_conn, "CREATE TABLE IF NOT EXISTS wal_test (id INT, data TEXT)", [])
- for topic <- topics do
- :ok = WebsocketClient.leave(socket, topic, %{})
- assert_receive %Message{topic: ^topic, event: "phx_close"}, 500
- end
+ Postgrex.query!(
+ db_conn,
+ """
+ CREATE OR REPLACE FUNCTION wal_test_trigger_func() RETURNS TRIGGER AS $$
+ BEGIN
+ PERFORM realtime.send(json_build_object ('value', 'test' :: text)::jsonb, 'test', '#{topic}', false);
+ RETURN NULL;
+ END;
+ $$ LANGUAGE plpgsql;
+ """,
+ []
+ )
- # wait to trigger tracker
- assert_process_down(socket, 5000)
- assert [] = Tracker.list_pids()
- end
+ Postgrex.query!(db_conn, "DROP TRIGGER IF EXISTS wal_test_trigger ON wal_test", [])
- test "failed connections are present in tracker with counter counter lower than 0 so they are actioned on by tracker",
- %{tenant: tenant} do
- assert [] = Tracker.list_pids()
+ Postgrex.query!(
+ db_conn,
+ """
+ CREATE TRIGGER wal_test_trigger
+ AFTER INSERT OR UPDATE OR DELETE ON wal_test
+ FOR EACH ROW
+ EXECUTE FUNCTION wal_test_trigger_func()
+ """,
+ []
+ )
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
+ GenServer.stop(db_conn)
- for _ <- 1..10 do
- topic = "realtime:#{random_string()}"
- :ok = WebsocketClient.join(socket, topic, %{config: config})
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "error"}}, 500
- end
+ on_exit(fn ->
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- assert [{_pid, count}] = Tracker.list_pids()
- assert count == 0
- end
+ Postgrex.query!(db_conn, "DROP TABLE IF EXISTS wal_test CASCADE", [])
+ end)
- test "failed connections but one succeeds properly tracks",
- %{tenant: tenant} do
- assert [] = Tracker.list_pids()
+ %{topic: topic}
+ end
- {socket, _} = get_connection(tenant)
- topic = "realtime:#{random_string()}"
+ test "track PID changes during WAL bloat creation", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ full_topic = "realtime:#{topic}"
- :ok =
- WebsocketClient.join(socket, topic, %{
- config: %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
- })
+ active_slot_query =
+ "SELECT active_pid FROM pg_replication_slots where active_pid is not null and slot_name = 'supabase_realtime_messages_replication_slot_'"
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert [{_pid, count}] = Tracker.list_pids()
- assert count == 1
+ WebsocketClient.join(socket, full_topic, %{config: config})
- for _ <- 1..10 do
- topic = "realtime:#{random_string()}"
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
- :ok =
- WebsocketClient.join(socket, topic, %{
- config: %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
- })
+ assert Connect.ready?(tenant.external_id)
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "error"}}, 500
- end
+ {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
- topic = "realtime:#{random_string()}"
+ original_connect_pid = Connect.whereis(tenant.external_id)
+ original_replication_pid = ReplicationConnection.whereis(tenant.external_id)
+ %{rows: [[original_db_pid]]} = Postgrex.query!(db_conn, active_slot_query, [])
- :ok =
- WebsocketClient.join(socket, topic, %{
- config: %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
- })
+ tasks =
+ for _ <- 1..5 do
+ Task.async(fn ->
+ {:ok, bloat_conn} = Database.connect(tenant, "realtime_bloat", :stop)
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert [{_pid, count}] = Tracker.list_pids()
- assert count == 2
- end
+ Postgrex.transaction(bloat_conn, fn conn ->
+ Postgrex.query(conn, "INSERT INTO wal_test SELECT generate_series(1, 100000), repeat('x', 2000)", [])
+ {:error, "test"}
+ end)
- defp mode(%{mode: :distributed}) do
- tenant = Api.get_tenant_by_external_id("dev_tenant")
+ Process.exit(bloat_conn, :normal)
+ end)
+ end
- RateCounter.stop(tenant.external_id)
- :ets.delete_all_objects(Tracker.table_name())
+ Task.await_many(tasks, 20000)
- Connect.shutdown(tenant.external_id)
- # Sleeping so that syn can forget about this Connect process
- Process.sleep(100)
+ # Kill all pending transactions still running
+ Postgrex.query!(
+ db_conn,
+ "SELECT pg_terminate_backend(pid) from pg_stat_activity where application_name='realtime_bloat'",
+ []
+ )
- on_exit(fn ->
- Connect.shutdown(tenant.external_id)
- # Sleeping so that syn can forget about this Connect process
- Process.sleep(100)
- end)
+ # Does it recover?
+ assert Connect.ready?(tenant.external_id)
+ {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ Process.sleep(1000)
+ %{rows: [[new_db_pid]]} = Postgrex.query!(db_conn, active_slot_query, [])
- on_exit(fn -> Connect.shutdown(tenant.external_id) end)
- {:ok, node} = Clustered.start()
- region = Tenants.region(tenant)
- {:ok, db_conn} = :erpc.call(node, Connect, :connect, ["dev_tenant", region])
- assert Connect.ready?(tenant.external_id)
+ assert new_db_pid != original_db_pid
+ assert ^original_connect_pid = Connect.whereis(tenant.external_id)
+ assert original_replication_pid != ReplicationConnection.whereis(tenant.external_id)
- assert node(db_conn) == node
- %{db_conn: db_conn, node: node, tenant: tenant}
- end
+ # Check if socket is still connected
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(socket, full_topic, "broadcast", payload)
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^full_topic}, 500
- defp mode(_) do
- tenant = Containers.checkout_tenant(run_migrations: true)
- RateCounter.stop(tenant.external_id)
+ # Check if we are receiving the message from replication connection
+ Postgrex.query!(db_conn, "INSERT INTO wal_test VALUES (1, 'test')", [])
- :ets.delete_all_objects(Tracker.table_name())
- Realtime.Tenants.Connect.shutdown(tenant.external_id)
- # Sleeping so that syn can forget about this Connect process
- Process.sleep(100)
- {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
- assert Connect.ready?(tenant.external_id)
- %{db_conn: db_conn, tenant: tenant}
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => "test",
+ "payload" => %{"value" => "test"},
+ "type" => "broadcast"
+ },
+ join_ref: nil,
+ ref: nil,
+ topic: ^full_topic
+ },
+ 5000
+ end
end
defp rls_context(%{tenant: tenant} = context) do
@@ -2399,15 +2396,16 @@ defmodule Realtime.Integration.RtChannelTest do
end
defp change_tenant_configuration(%Tenant{external_id: external_id}, limit, value) do
- external_id
- |> Realtime.Tenants.get_tenant_by_external_id()
- |> Realtime.Api.Tenant.changeset(%{limit => value})
- |> Realtime.Repo.update!()
+ tenant =
+ external_id
+ |> Realtime.Tenants.get_tenant_by_external_id()
+ |> Realtime.Api.Tenant.changeset(%{limit => value})
+ |> Realtime.Repo.update!()
- Realtime.Tenants.Cache.invalidate_tenant_cache(external_id)
+ Realtime.Tenants.Cache.update_cache(tenant)
end
- defp assert_process_down(pid, timeout \\ 100) do
+ defp assert_process_down(pid, timeout \\ 1000) do
ref = Process.monitor(pid)
assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
end
diff --git a/test/integration/tests.ts b/test/integration/tests.ts
new file mode 100644
index 000000000..036255f17
--- /dev/null
+++ b/test/integration/tests.ts
@@ -0,0 +1,204 @@
+import { RealtimeClient } from "npm:@supabase/supabase-js@latest";
+import { sleep } from "https://deno.land/x/sleep/mod.ts";
+import { describe, it } from "jsr:@std/testing/bdd";
+import { assertEquals } from "jsr:@std/assert";
+import { deadline } from "jsr:@std/async/deadline";
+
+const withDeadline =
Promise>(fn: Fn, ms: number): Fn =>
+ ((...args) => deadline(fn(...args), ms)) as Fn;
+
+const url = "http://realtime-dev.localhost:4100/socket";
+const serviceRoleKey = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNzU3NzYzODIsInJlZiI6IjEyNy4wLjAuMSIsInJvbGUiOiJzZXJ2aWNlX3JvbGUiLCJpYXQiOjE3NjA3NzYzODJ9.nupH8pnrOTgK9Xaq8-D4Ry-yQ-PnlXEagTVywQUJVIE"
+const apiKey = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNzU2NjE3MjEsInJlZiI6IjEyNy4wLjAuMSIsInJvbGUiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzYwNjYxNzIxfQ.PxpBoelC9vWQ2OVhmwKBUDEIKgX7MpgSdsnmXw7UdYk";
+
+const realtimeV1 = { vsn: '1.0.0', params: { apikey: apiKey } , heartbeatIntervalMs: 5000, timeout: 5000 };
+const realtimeV2 = { vsn: '2.0.0', params: { apikey: apiKey } , heartbeatIntervalMs: 5000, timeout: 5000 };
+const realtimeServiceRole = { vsn: '2.0.0', logger: console.log, params: { apikey: serviceRoleKey } , heartbeatIntervalMs: 5000, timeout: 5000 };
+
+let clientV1: RealtimeClient | null;
+let clientV2: RealtimeClient | null;
+
+describe("broadcast extension", { sanitizeOps: false, sanitizeResources: false }, () => {
+ it("users with different versions can receive self broadcast", withDeadline(async () => {
+ clientV1 = new RealtimeClient(url, realtimeV1)
+ clientV2 = new RealtimeClient(url, realtimeV2)
+ let resultV1 = null;
+ let resultV2 = null;
+ let event = crypto.randomUUID();
+ let topic = "topic:" + crypto.randomUUID();
+ let expectedPayload = { message: crypto.randomUUID() };
+ const config = { config: { broadcast: { ack: true, self: true } } };
+
+ const channelV1 = clientV1
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (resultV1 = payload))
+ .subscribe();
+
+ const channelV2 = clientV2
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (resultV2 = payload))
+ .subscribe();
+
+ while (channelV1.state != "joined" || channelV2.state != "joined") await sleep(0.2);
+
+ // Send from V1 client - both should receive
+ await channelV1.send({
+ type: "broadcast",
+ event,
+ payload: expectedPayload,
+ });
+
+ while (resultV1 == null || resultV2 == null) await sleep(0.2);
+
+ assertEquals(resultV1, expectedPayload);
+ assertEquals(resultV2, expectedPayload);
+
+ // Reset results for second test
+ resultV1 = null;
+ resultV2 = null;
+ let expectedPayload2 = { message: crypto.randomUUID() };
+
+ // Send from V2 client - both should receive
+ await channelV2.send({
+ type: "broadcast",
+ event,
+ payload: expectedPayload2,
+ });
+
+ while (resultV1 == null || resultV2 == null) await sleep(0.2);
+
+ assertEquals(resultV1, expectedPayload2);
+ assertEquals(resultV2, expectedPayload2);
+
+ await channelV1.unsubscribe();
+ await channelV2.unsubscribe();
+
+ await stopClient(clientV1);
+ await stopClient(clientV2);
+ clientV1 = null;
+ clientV2 = null;
+ }, 5000));
+
+ it("v2 can send/receive binary payload", withDeadline(async () => {
+ clientV2 = new RealtimeClient(url, realtimeV2)
+ let result = null;
+ let event = crypto.randomUUID();
+ let topic = "topic:" + crypto.randomUUID();
+ const expectedPayload = new ArrayBuffer(2);
+ const uint8 = new Uint8Array(expectedPayload); // View the buffer as unsigned 8-bit integers
+ uint8[0] = 125;
+ uint8[1] = 255;
+
+ const config = { config: { broadcast: { ack: true, self: true } } };
+
+ const channelV2 = clientV2
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (result = payload))
+ .subscribe();
+
+ while (channelV2.state != "joined") await sleep(0.2);
+
+ await channelV2.send({
+ type: "broadcast",
+ event,
+ payload: expectedPayload,
+ });
+
+ while (result == null) await sleep(0.2);
+
+ assertEquals(result, expectedPayload);
+
+ await channelV2.unsubscribe();
+
+ await stopClient(clientV2);
+ clientV2 = null;
+ }, 5000));
+
+ it("users with different versions can receive broadcasts from endpoint", withDeadline(async () => {
+ clientV1 = new RealtimeClient(url, realtimeV1)
+ clientV2 = new RealtimeClient(url, realtimeV2)
+ let resultV1 = null;
+ let resultV2 = null;
+ let event = crypto.randomUUID();
+ let topic = "topic:" + crypto.randomUUID();
+ let expectedPayload = { message: crypto.randomUUID() };
+ const config = { config: { broadcast: { ack: true, self: true } } };
+
+ const channelV1 = clientV1
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (resultV1 = payload))
+ .subscribe();
+
+ const channelV2 = clientV2
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (resultV2 = payload))
+ .subscribe();
+
+ while (channelV1.state != "joined" || channelV2.state != "joined") await sleep(0.2);
+
+ // Send from unsubscribed channel - both should receive
+ new RealtimeClient(url, realtimeServiceRole).channel(topic, config).httpSend(event, expectedPayload);
+
+ while (resultV1 == null || resultV2 == null) await sleep(0.2);
+
+ assertEquals(resultV1, expectedPayload);
+ assertEquals(resultV2, expectedPayload);
+
+ await channelV1.unsubscribe();
+ await channelV2.unsubscribe();
+
+ await stopClient(clientV1);
+ await stopClient(clientV2);
+ clientV1 = null;
+ clientV2 = null;
+ }, 5000));
+});
+
+// describe("presence extension", () => {
+// it("user is able to receive presence updates", async () => {
+// let result: any = [];
+// let error = null;
+// let topic = "topic:" + crypto.randomUUID();
+// let keyV1 = "key V1";
+// let keyV2 = "key V2";
+//
+// const configV1 = { config: { presence: { keyV1 } } };
+// const configV2 = { config: { presence: { keyV1 } } };
+//
+// const channelV1 = clientV1
+// .channel(topic, configV1)
+// .on("presence", { event: "join" }, ({ key, newPresences }) =>
+// result.push({ key, newPresences })
+// )
+// .subscribe();
+//
+// const channelV2 = clientV2
+// .channel(topic, configV2)
+// .on("presence", { event: "join" }, ({ key, newPresences }) =>
+// result.push({ key, newPresences })
+// )
+// .subscribe();
+//
+// while (channelV1.state != "joined" || channelV2.state != "joined") await sleep(0.2);
+//
+// const resV1 = await channelV1.track({ key: keyV1 });
+// const resV2 = await channelV2.track({ key: keyV2 });
+//
+// if (resV1 == "timed out" || resV2 == "timed out") error = resV1 || resV2;
+//
+// sleep(2.2);
+//
+// // FIXME write assertions
+// console.log(result)
+// let presences = result[0].newPresences[0];
+// assertEquals(result[0].key, keyV1);
+// assertEquals(presences.message, message);
+// assertEquals(error, null);
+// });
+// });
+
+async function stopClient(client: RealtimeClient | null) {
+ if (client) {
+ await client.removeAllChannels();
+ }
+}
diff --git a/test/integration/tracker_test.exs b/test/integration/tracker_test.exs
new file mode 100644
index 000000000..32b73f65a
--- /dev/null
+++ b/test/integration/tracker_test.exs
@@ -0,0 +1,101 @@
+defmodule Integration.TrackerTest do
+ # Changing the Tracker ETS table
+ use RealtimeWeb.ConnCase, async: false
+
+ alias RealtimeWeb.RealtimeChannel.Tracker
+ alias Phoenix.Socket.Message
+ alias Realtime.Tenants.Connect
+ alias Realtime.Integration.WebsocketClient
+
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ :ets.delete_all_objects(Tracker.table_name())
+
+ {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ assert Connect.ready?(tenant.external_id)
+ %{db_conn: db_conn, tenant: tenant}
+ end
+
+ test "tracks and untracks properly channels", %{tenant: tenant} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
+
+ topics =
+ for _ <- 1..10 do
+ topic = "realtime:#{random_string()}"
+ :ok = WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{topic: ^topic, event: "phx_reply"}, 500
+ topic
+ end
+
+ for topic <- topics do
+ :ok = WebsocketClient.leave(socket, topic, %{})
+ assert_receive %Message{topic: ^topic, event: "phx_close"}, 500
+ end
+
+ start_supervised!({Tracker, check_interval_in_ms: 100})
+ # wait to trigger tracker
+ assert_process_down(socket, 1000)
+ end
+
+ test "failed connections are present in tracker with counter lower than 0 so they are actioned on by tracker", %{
+ tenant: tenant
+ } do
+ assert [] = Tracker.list_pids()
+
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
+
+ for _ <- 1..10 do
+ topic = "realtime:#{random_string()}"
+ :ok = WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "error"}}, 500
+ end
+
+ assert [{_pid, count}] = Tracker.list_pids()
+ assert count == 0
+ end
+
+ test "failed connections but one succeeds properly tracks", %{tenant: tenant} do
+ assert [] = Tracker.list_pids()
+
+ {socket, _} = get_connection(tenant)
+ topic = "realtime:#{random_string()}"
+
+ :ok =
+ WebsocketClient.join(socket, topic, %{
+ config: %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
+ })
+
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert [{_pid, count}] = Tracker.list_pids()
+ assert count == 1
+
+ for _ <- 1..10 do
+ topic = "realtime:#{random_string()}"
+
+ :ok =
+ WebsocketClient.join(socket, topic, %{
+ config: %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
+ })
+
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "error"}}, 500
+ end
+
+ topic = "realtime:#{random_string()}"
+
+ :ok =
+ WebsocketClient.join(socket, topic, %{
+ config: %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
+ })
+
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert [{_pid, count}] = Tracker.list_pids()
+ assert count == 2
+ end
+
+ defp assert_process_down(pid, timeout) do
+ ref = Process.monitor(pid)
+ assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
+ end
+end
diff --git a/test/realtime/api_test.exs b/test/realtime/api_test.exs
index 1c4a816b0..06d554110 100644
--- a/test/realtime/api_test.exs
+++ b/test/realtime/api_test.exs
@@ -1,5 +1,5 @@
defmodule Realtime.ApiTest do
- use Realtime.DataCase, async: false
+ use Realtime.DataCase, async: true
use Mimic
@@ -13,22 +13,24 @@ defmodule Realtime.ApiTest do
@db_conf Application.compile_env(:realtime, Realtime.Repo)
- setup do
- tenant1 = Containers.checkout_tenant(run_migrations: true)
- tenant2 = Containers.checkout_tenant(run_migrations: true)
- Api.update_tenant(tenant1, %{max_concurrent_users: 10_000_000})
- Api.update_tenant(tenant2, %{max_concurrent_users: 20_000_000})
-
- %{tenants: Api.list_tenants(), tenant: tenant1}
+ defp create_tenants(_) do
+ tenant1 = tenant_fixture(%{max_concurrent_users: 10_000_000})
+ tenant2 = tenant_fixture(%{max_concurrent_users: 20_000_000})
+ dev_tenant = Realtime.Api.get_tenant_by_external_id("dev_tenant")
+ %{tenants: [tenant1, tenant2, dev_tenant]}
end
describe "list_tenants/0" do
+ setup [:create_tenants]
+
test "returns all tenants", %{tenants: tenants} do
assert Enum.sort(Api.list_tenants()) == Enum.sort(tenants)
end
end
describe "list_tenants/1" do
+ setup [:create_tenants]
+
test "list_tenants/1 returns filtered tenants", %{tenants: tenants} do
assert hd(Api.list_tenants(search: hd(tenants).external_id)) == hd(tenants)
@@ -38,6 +40,8 @@ defmodule Realtime.ApiTest do
end
describe "get_tenant!/1" do
+ setup [:create_tenants]
+
test "returns the tenant with given id", %{tenants: [tenant | _]} do
result = tenant.id |> Api.get_tenant!() |> Map.delete(:extensions)
expected = tenant |> Map.delete(:extensions)
@@ -51,6 +55,10 @@ defmodule Realtime.ApiTest do
external_id = random_string()
+ expect(Realtime.Tenants.Cache, :global_cache_update, fn tenant ->
+ assert tenant.external_id == external_id
+ end)
+
valid_attrs = %{
external_id: external_id,
name: external_id,
@@ -85,11 +93,14 @@ defmodule Realtime.ApiTest do
end
test "invalid data returns error changeset" do
+ reject(&Realtime.Tenants.Cache.global_cache_update/1)
assert {:error, %Ecto.Changeset{}} = Api.create_tenant(%{external_id: nil, jwt_secret: nil, name: nil})
end
end
- describe "get_tenant_by_external_id/1" do
+ describe "get_tenant_by_external_id/2" do
+ setup [:create_tenants]
+
test "fetch by external id", %{tenants: [tenant | _]} do
%Tenant{extensions: [%Extensions{} = extension]} =
Api.get_tenant_by_external_id(tenant.external_id)
@@ -98,56 +109,78 @@ defmodule Realtime.ApiTest do
password = extension.settings["db_password"]
assert ^password = "v1QVng3N+pZd/0AEObABwg=="
end
+
+ test "fetch by external id using replica", %{tenants: [tenant | _]} do
+ %Tenant{extensions: [%Extensions{} = extension]} =
+ Api.get_tenant_by_external_id(tenant.external_id, use_replica?: true)
+
+ assert Map.has_key?(extension.settings, "db_password")
+ password = extension.settings["db_password"]
+ assert ^password = "v1QVng3N+pZd/0AEObABwg=="
+ end
+
+ test "fetch by external id using no replica", %{tenants: [tenant | _]} do
+ %Tenant{extensions: [%Extensions{} = extension]} =
+ Api.get_tenant_by_external_id(tenant.external_id, use_replica?: false)
+
+ assert Map.has_key?(extension.settings, "db_password")
+ password = extension.settings["db_password"]
+ assert ^password = "v1QVng3N+pZd/0AEObABwg=="
+ end
end
- describe "update_tenant/2" do
- test "valid data updates the tenant", %{tenant: tenant} do
+ describe "update_tenant_by_external_id/2" do
+ setup [:create_tenants]
+
+ test "valid data updates the tenant using external_id", %{tenants: [tenant | _]} do
update_attrs = %{
external_id: tenant.external_id,
jwt_secret: "some updated jwt_secret",
name: "some updated name"
}
- assert {:ok, %Tenant{} = tenant} = Api.update_tenant(tenant, update_attrs)
+ assert {:ok, %Tenant{} = tenant} = Api.update_tenant_by_external_id(tenant.external_id, update_attrs)
assert tenant.external_id == tenant.external_id
assert tenant.jwt_secret == Crypto.encrypt!("some updated jwt_secret")
assert tenant.name == "some updated name"
end
- test "invalid data returns error changeset", %{tenant: tenant} do
- assert {:error, %Ecto.Changeset{}} = Api.update_tenant(tenant, %{external_id: nil, jwt_secret: nil, name: nil})
+ test "invalid data returns error changeset", %{tenants: [tenant | _]} do
+ assert {:error, %Ecto.Changeset{}} =
+ Api.update_tenant_by_external_id(tenant.external_id, %{external_id: nil, jwt_secret: nil, name: nil})
end
- test "valid data and jwks change will send disconnect event", %{tenant: tenant} do
+ test "valid data and jwks change will send disconnect event", %{tenants: [tenant | _]} do
:ok = Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{jwt_jwks: %{keys: ["test"]}})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{jwt_jwks: %{keys: ["test"]}})
assert_receive :disconnect, 500
end
- test "valid data and jwt_secret change will send disconnect event", %{tenant: tenant} do
+ test "valid data and jwt_secret change will send disconnect event", %{tenants: [tenant | _]} do
:ok = Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{jwt_secret: "potato"})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{jwt_secret: "potato"})
assert_receive :disconnect, 500
end
- test "valid data and suspend change will send disconnect event", %{tenant: tenant} do
+ test "valid data and suspend change will send disconnect event", %{tenants: [tenant | _]} do
:ok = Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{suspend: true})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{suspend: true})
assert_receive :disconnect, 500
end
- test "valid data but not updating jwt_secret or jwt_jwks won't send event", %{tenant: tenant} do
+ test "valid data but not updating jwt_secret or jwt_jwks won't send event", %{tenants: [tenant | _]} do
:ok = Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{max_events_per_second: 100})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{max_events_per_second: 100})
refute_receive :disconnect, 500
end
- test "valid data and jwt_secret change will restart the database connection", %{tenant: tenant} do
+ test "valid data and jwt_secret change will restart the database connection" do
+ tenant = Containers.checkout_tenant(run_migrations: true)
{:ok, old_pid} = Connect.lookup_or_start_connection(tenant.external_id)
Process.monitor(old_pid)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{jwt_secret: "potato"})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{jwt_secret: "potato"})
assert_receive {:DOWN, _, :process, ^old_pid, :shutdown}, 500
refute Process.alive?(old_pid)
Process.sleep(100)
@@ -155,28 +188,36 @@ defmodule Realtime.ApiTest do
assert %Postgrex.Result{} = Postgrex.query!(new_pid, "SELECT 1", [])
end
- test "valid data and suspend change will restart the database connection", %{tenant: tenant} do
+ test "valid data and suspend change will restart the database connection" do
+ tenant = Containers.checkout_tenant(run_migrations: true)
{:ok, old_pid} = Connect.lookup_or_start_connection(tenant.external_id)
Process.monitor(old_pid)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{suspend: true})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{suspend: true})
assert_receive {:DOWN, _, :process, ^old_pid, :shutdown}, 500
refute Process.alive?(old_pid)
Process.sleep(100)
assert {:error, :tenant_suspended} = Connect.lookup_or_start_connection(tenant.external_id)
end
- test "valid data and tenant data change will not restart the database connection", %{tenant: tenant} do
+ test "valid data and tenant data change will not restart the database connection" do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+
+ expect(Realtime.Tenants.Cache, :global_cache_update, fn tenant ->
+ assert tenant.max_concurrent_users == 101
+ end)
+
{:ok, old_pid} = Connect.lookup_or_start_connection(tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{max_concurrent_users: 100})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{max_concurrent_users: 101})
refute_receive {:DOWN, _, :process, ^old_pid, :shutdown}, 500
assert Process.alive?(old_pid)
assert {:ok, new_pid} = Connect.lookup_or_start_connection(tenant.external_id)
assert old_pid == new_pid
end
- test "valid data and extensions data change will restart the database connection", %{tenant: tenant} do
+ test "valid data and extensions data change will restart the database connection" do
+ tenant = Containers.checkout_tenant(run_migrations: true)
config = Realtime.Database.from_tenant(tenant, "realtime_test", :stop)
extensions = [
@@ -200,7 +241,7 @@ defmodule Realtime.ApiTest do
{:ok, old_pid} = Connect.lookup_or_start_connection(tenant.external_id)
Process.monitor(old_pid)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{extensions: extensions})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
assert_receive {:DOWN, _, :process, ^old_pid, :shutdown}, 500
refute Process.alive?(old_pid)
Process.sleep(100)
@@ -208,22 +249,17 @@ defmodule Realtime.ApiTest do
assert %Postgrex.Result{} = Postgrex.query!(new_pid, "SELECT 1", [])
end
- test "valid data and change to tenant data will refresh cache", %{tenant: tenant} do
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{name: "new_name"})
- assert %Tenant{name: "new_name"} = Realtime.Tenants.Cache.get_tenant_by_external_id(tenant.external_id)
- end
+ test "valid data and change to tenant data will refresh cache", %{tenants: [tenant | _]} do
+ expect(Realtime.Tenants.Cache, :global_cache_update, fn tenant ->
+ assert tenant.name == "new_name"
+ end)
- test "valid data and no changes to tenant will not refresh cache", %{tenant: tenant} do
- reject(&Realtime.Tenants.Cache.get_tenant_by_external_id/1)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{name: tenant.name})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{name: "new_name"})
end
- end
- describe "delete_tenant/1" do
- test "deletes the tenant" do
- tenant = tenant_fixture()
- assert {:ok, %Tenant{}} = Api.delete_tenant(tenant)
- assert_raise Ecto.NoResultsError, fn -> Api.get_tenant!(tenant.id) end
+ test "valid data and no changes to tenant will not refresh cache", %{tenants: [tenant | _]} do
+ reject(&Realtime.Tenants.Cache.global_cache_update/1)
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{name: tenant.name})
end
end
@@ -236,11 +272,9 @@ defmodule Realtime.ApiTest do
end
end
- test "list_extensions/1 ", %{tenants: tenants} do
- assert length(Api.list_extensions()) == length(tenants)
- end
-
describe "preload_counters/1" do
+ setup [:create_tenants]
+
test "preloads counters for a given tenant ", %{tenants: [tenant | _]} do
tenant = Repo.reload!(tenant)
assert Api.preload_counters(nil) == nil
@@ -256,6 +290,7 @@ defmodule Realtime.ApiTest do
end
describe "rename_settings_field/2" do
+ @tag skip: "** (Postgrex.Error) ERROR 0A000 (feature_not_supported) cached plan must not change result type"
test "renames setting fields" do
tenant = tenant_fixture()
Api.rename_settings_field("poll_interval_ms", "poll_interval")
@@ -340,4 +375,18 @@ defmodule Realtime.ApiTest do
refute TestRequiresRestartingDbConnection.check(changeset)
end
end
+
+ describe "update_migrations_ran/1" do
+ test "updates migrations_ran to the count of all migrations" do
+ tenant = tenant_fixture(%{migrations_ran: 0})
+
+ expect(Realtime.Tenants.Cache, :global_cache_update, fn tenant ->
+ assert tenant.migrations_ran == 1
+ :ok
+ end)
+
+ assert {:ok, tenant} = Api.update_migrations_ran(tenant.external_id, 1)
+ assert tenant.migrations_ran == 1
+ end
+ end
end
diff --git a/test/realtime/database_distributed_test.exs b/test/realtime/database_distributed_test.exs
new file mode 100644
index 000000000..43b40743e
--- /dev/null
+++ b/test/realtime/database_distributed_test.exs
@@ -0,0 +1,100 @@
+defmodule Realtime.DatabaseDistributedTest do
+ # async: false due to usage of Clustered + dev_tenant
+ use Realtime.DataCase, async: false
+
+ import ExUnit.CaptureLog
+
+ alias Realtime.Database
+ alias Realtime.Rpc
+ alias Realtime.Tenants.Connect
+
+ doctest Realtime.Database
+ def handle_telemetry(event, metadata, content, pid: pid), do: send(pid, {event, metadata, content})
+
+ setup do
+ tenant = Containers.checkout_tenant()
+ :telemetry.attach(__MODULE__, [:realtime, :database, :transaction], &__MODULE__.handle_telemetry/4, pid: self())
+
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ %{tenant: tenant}
+ end
+
+ @aux_mod (quote do
+ defmodule DatabaseAux do
+ def checker(transaction_conn) do
+ Postgrex.query!(transaction_conn, "SELECT 1", [])
+ end
+
+ def error(transaction_conn) do
+ Postgrex.query!(transaction_conn, "SELECT 1/0", [])
+ end
+
+ def exception(_) do
+ raise RuntimeError, "💣"
+ end
+ end
+ end)
+
+ Code.eval_quoted(@aux_mod)
+
+ describe "transaction/1 in clustered mode" do
+ setup do
+ Connect.shutdown("dev_tenant")
+ # Waiting for :syn to "unregister" if the Connect process was up
+ Process.sleep(100)
+ :ok
+ end
+
+ test "success call returns output" do
+ {:ok, node} = Clustered.start(@aux_mod)
+ {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
+ assert node(db_conn) == node
+ assert {:ok, %Postgrex.Result{rows: [[1]]}} = Database.transaction(db_conn, &DatabaseAux.checker/1)
+ end
+
+ test "handles database errors" do
+ metadata = [external_id: "123", project: "123"]
+ {:ok, node} = Clustered.start(@aux_mod)
+ {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
+ assert node(db_conn) == node
+
+ assert capture_log(fn ->
+ assert {:error, %Postgrex.Error{}} = Database.transaction(db_conn, &DatabaseAux.error/1, [], metadata)
+ # We have to wait for logs to be relayed to this node
+ Process.sleep(100)
+ end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
+ end
+
+ test "handles exception" do
+ metadata = [external_id: "123", project: "123"]
+ {:ok, node} = Clustered.start(@aux_mod)
+ {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
+ assert node(db_conn) == node
+
+ assert capture_log(fn ->
+ assert {:error, %RuntimeError{}} = Database.transaction(db_conn, &DatabaseAux.exception/1, [], metadata)
+ # We have to wait for logs to be relayed to this node
+ Process.sleep(100)
+ end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
+ end
+
+ test "db process is not alive anymore" do
+ metadata = [external_id: "123", project: "123", tenant_id: "123"]
+ {:ok, node} = Clustered.start(@aux_mod)
+ # Grab a remote pid that will not exist. :erpc uses a new process to perform the call.
+ # Once it has returned the process is not alive anymore
+
+ pid = Rpc.call(node, :erlang, :self, [])
+ assert node(pid) == node
+
+ assert capture_log(fn ->
+ assert {:error, {:exit, {:noproc, {DBConnection.Holder, :checkout, [^pid, []]}}}} =
+ Database.transaction(pid, &DatabaseAux.checker/1, [], metadata)
+
+ # We have to wait for logs to be relayed to this node
+ Process.sleep(100)
+ end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
+ end
+ end
+end
diff --git a/test/realtime/database_test.exs b/test/realtime/database_test.exs
index f48de14b6..f8e8c8b86 100644
--- a/test/realtime/database_test.exs
+++ b/test/realtime/database_test.exs
@@ -1,12 +1,9 @@
defmodule Realtime.DatabaseTest do
- # async: false due to usage of Clustered
- use Realtime.DataCase, async: false
+ use Realtime.DataCase, async: true
import ExUnit.CaptureLog
alias Realtime.Database
- alias Realtime.Rpc
- alias Realtime.Tenants.Connect
doctest Realtime.Database
def handle_telemetry(event, metadata, content, pid: pid), do: send(pid, {event, metadata, content})
@@ -48,8 +45,7 @@ defmodule Realtime.DatabaseTest do
# Connection limit for docker tenant db is 100
@tag db_pool: 50,
- subs_pool_size: 21,
- subcriber_pool_size: 33
+ subs_pool_size: 73
test "restricts connection if tenant database cannot receive more connections based on tenant pool",
%{tenant: tenant} do
assert capture_log(fn ->
@@ -215,84 +211,6 @@ defmodule Realtime.DatabaseTest do
end
end
- @aux_mod (quote do
- defmodule DatabaseAux do
- def checker(transaction_conn) do
- Postgrex.query!(transaction_conn, "SELECT 1", [])
- end
-
- def error(transaction_conn) do
- Postgrex.query!(transaction_conn, "SELECT 1/0", [])
- end
-
- def exception(_) do
- raise RuntimeError, "💣"
- end
- end
- end)
-
- Code.eval_quoted(@aux_mod)
-
- describe "transaction/1 in clustered mode" do
- setup do
- Connect.shutdown("dev_tenant")
- # Waiting for :syn to "unregister" if the Connect process was up
- Process.sleep(100)
- :ok
- end
-
- test "success call returns output" do
- {:ok, node} = Clustered.start(@aux_mod)
- {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
- assert node(db_conn) == node
- assert {:ok, %Postgrex.Result{rows: [[1]]}} = Database.transaction(db_conn, &DatabaseAux.checker/1)
- end
-
- test "handles database errors" do
- metadata = [external_id: "123", project: "123"]
- {:ok, node} = Clustered.start(@aux_mod)
- {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
- assert node(db_conn) == node
-
- assert capture_log(fn ->
- assert {:error, %Postgrex.Error{}} = Database.transaction(db_conn, &DatabaseAux.error/1, [], metadata)
- # We have to wait for logs to be relayed to this node
- Process.sleep(100)
- end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
- end
-
- test "handles exception" do
- metadata = [external_id: "123", project: "123"]
- {:ok, node} = Clustered.start(@aux_mod)
- {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
- assert node(db_conn) == node
-
- assert capture_log(fn ->
- assert {:error, %RuntimeError{}} = Database.transaction(db_conn, &DatabaseAux.exception/1, [], metadata)
- # We have to wait for logs to be relayed to this node
- Process.sleep(100)
- end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
- end
-
- test "db process is not alive anymore" do
- metadata = [external_id: "123", project: "123", tenant_id: "123"]
- {:ok, node} = Clustered.start(@aux_mod)
- # Grab a remote pid that will not exist. :erpc uses a new process to perform the call.
- # Once it has returned the process is not alive anymore
-
- pid = Rpc.call(node, :erlang, :self, [])
- assert node(pid) == node
-
- assert capture_log(fn ->
- assert {:error, {:exit, {:noproc, {DBConnection.Holder, :checkout, [^pid, []]}}}} =
- Database.transaction(pid, &DatabaseAux.checker/1, [], metadata)
-
- # We have to wait for logs to be relayed to this node
- Process.sleep(100)
- end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
- end
- end
-
describe "pool_size_by_application_name/2" do
test "returns the number of connections per application name" do
assert Database.pool_size_by_application_name("realtime_connect", %{}) == 1
@@ -409,6 +327,6 @@ defmodule Realtime.DatabaseTest do
put_in(extension, ["settings", "db_port"], db_port)
]
- Realtime.Api.update_tenant(tenant, %{extensions: extensions})
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
end
end
diff --git a/test/realtime/extensions/cdc_rls/cdc_rls_test.exs b/test/realtime/extensions/cdc_rls/cdc_rls_test.exs
index 5f341c134..77c54e4ae 100644
--- a/test/realtime/extensions/cdc_rls/cdc_rls_test.exs
+++ b/test/realtime/extensions/cdc_rls/cdc_rls_test.exs
@@ -1,7 +1,7 @@
defmodule Realtime.Extensions.CdcRlsTest do
# async: false due to usage of dev_tenant
# Also global mimic mock
- use RealtimeWeb.ChannelCase, async: false
+ use Realtime.DataCase, async: false
use Mimic
import ExUnit.CaptureLog
@@ -9,6 +9,7 @@ defmodule Realtime.Extensions.CdcRlsTest do
setup :set_mimic_global
alias Extensions.PostgresCdcRls
+ alias Extensions.PostgresCdcRls.Subscriptions
alias PostgresCdcRls.SubscriptionManager
alias Postgrex
alias Realtime.Api
@@ -24,76 +25,35 @@ defmodule Realtime.Extensions.CdcRlsTest do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- Database.transaction(conn, fn db_conn ->
- queries = [
- "drop table if exists public.test",
- "drop publication if exists supabase_realtime_test",
- "create sequence if not exists test_id_seq;",
- """
- create table if not exists "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication supabase_realtime_test for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
- end)
-
%Tenant{extensions: extensions, external_id: external_id} = tenant
postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
args = Map.put(postgres_extension, "id", external_id)
- pg_change_params = [
- %{
- id: UUID.uuid1(),
- params: %{"event" => "*", "schema" => "public"},
- channel_pid: self(),
- claims: %{
- "exp" => System.system_time(:second) + 100_000,
- "iat" => 0,
- "ref" => "127.0.0.1",
- "role" => "anon"
- }
- }
- ]
-
- ids =
- Enum.map(pg_change_params, fn %{id: id, params: params} ->
- {UUID.string_to_binary!(id), :erlang.phash2(params)}
- end)
-
- topic = "realtime:test"
- serializer = Phoenix.Socket.V1.JSONSerializer
-
- subscription_metadata = {:subscriber_fastlane, self(), serializer, ids, topic, external_id, true}
- metadata = [metadata: subscription_metadata]
- :ok = PostgresCdc.subscribe(PostgresCdcRls, pg_change_params, external_id, metadata)
+ pg_change_params = pubsub_subscribe(external_id)
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
# First time it will return nil
PostgresCdcRls.handle_connect(args)
# Wait for it to start
- Process.sleep(3000)
+ assert_receive %{event: "ready"}, 1000
+
+ on_exit(fn -> PostgresCdcRls.handle_stop(external_id, 10_000) end)
{:ok, response} = PostgresCdcRls.handle_connect(args)
# Now subscribe to the Postgres Changes
- {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params)
+ {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params, external_id)
- on_exit(fn -> PostgresCdcRls.handle_stop(external_id, 10_000) end)
+ RealtimeWeb.Endpoint.unsubscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
%{tenant: tenant}
end
- @tag skip: "Flaky test. When logger handle_sasl_reports is enabled this test doesn't break"
- test "Check supervisor crash and respawn", %{tenant: tenant} do
+ test "supervisor crash must not respawn", %{tenant: tenant} do
+ scope = Realtime.Syn.PostgresCdc.scope(tenant.external_id)
+
sup =
Enum.reduce_while(1..30, nil, fn _, acc ->
- :syn.lookup(Extensions.PostgresCdcRls, tenant.external_id)
+ scope
+ |> :syn.lookup(tenant.external_id)
|> case do
:undefined ->
Process.sleep(500)
@@ -107,27 +67,22 @@ defmodule Realtime.Extensions.CdcRlsTest do
assert Process.alive?(sup)
Process.monitor(sup)
- RealtimeWeb.Endpoint.subscribe(PostgresCdcRls.syn_topic(tenant.external_id))
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
Process.exit(sup, :kill)
- assert_receive {:DOWN, _, :process, ^sup, _reason}, 5000
+ scope_down = Atom.to_string(scope) <> "_down"
- assert_receive %{event: "ready"}, 5000
-
- {sup2, _} = :syn.lookup(Extensions.PostgresCdcRls, tenant.external_id)
+ assert_receive {:DOWN, _, :process, ^sup, _reason}, 5000
+ assert_receive %{event: ^scope_down}
+ refute_receive %{event: "ready"}, 1000
- assert(sup != sup2)
- assert Process.alive?(sup2)
+ :undefined = :syn.lookup(Realtime.Syn.PostgresCdc.scope(tenant.external_id), tenant.external_id)
end
test "Subscription manager updates oids", %{tenant: tenant} do
{subscriber_manager_pid, conn} =
Enum.reduce_while(1..25, nil, fn _, acc ->
case PostgresCdcRls.get_manager_conn(tenant.external_id) do
- nil ->
- Process.sleep(200)
- {:cont, acc}
-
{:error, :wait} ->
Process.sleep(200)
{:cont, acc}
@@ -153,7 +108,10 @@ defmodule Realtime.Extensions.CdcRlsTest do
test "Stop tenant supervisor", %{tenant: tenant} do
sup =
Enum.reduce_while(1..10, nil, fn _, acc ->
- case :syn.lookup(Extensions.PostgresCdcRls, tenant.external_id) do
+ tenant.external_id
+ |> Realtime.Syn.PostgresCdc.scope()
+ |> :syn.lookup(tenant.external_id)
+ |> case do
:undefined ->
Process.sleep(500)
{:cont, acc}
@@ -169,6 +127,39 @@ defmodule Realtime.Extensions.CdcRlsTest do
end
end
+ describe "handle_after_connect/4" do
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ %{tenant: tenant}
+ end
+
+ test "subscription error rate limit", %{tenant: tenant} do
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
+
+ stub(Subscriptions, :create, fn _conn, _publication, _subscription_list, _manager, _caller ->
+ {:error, %DBConnection.ConnectionError{}}
+ end)
+
+ # Now try to subscribe to the Postgres Changes
+ for _x <- 1..6 do
+ assert {:error, "Too many database timeouts"} =
+ PostgresCdcRls.handle_after_connect({:manager_pid, self()}, postgres_extension, %{}, external_id)
+ end
+
+ rate = Realtime.Tenants.subscription_errors_per_second_rate(external_id, 4)
+
+ assert {:ok, %RateCounter{id: {:channel, :subscription_errors, ^external_id}, sum: 6, limit: %{triggered: true}}} =
+ RateCounterHelper.tick!(rate)
+
+ # It won't even be called now
+ reject(&Subscriptions.create/5)
+
+ assert {:error, "Too many database timeouts"} =
+ PostgresCdcRls.handle_after_connect({:manager_pid, self()}, postgres_extension, %{}, external_id)
+ end
+ end
+
describe "Region rebalancing" do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
@@ -208,36 +199,7 @@ defmodule Realtime.Extensions.CdcRlsTest do
end
describe "integration" do
- setup do
- tenant = Api.get_tenant_by_external_id("dev_tenant")
- PostgresCdcRls.handle_stop(tenant.external_id, 10_000)
-
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- Database.transaction(conn, fn db_conn ->
- queries = [
- "drop table if exists public.test",
- "drop publication if exists supabase_realtime_test",
- "create sequence if not exists test_id_seq;",
- """
- create table if not exists "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication supabase_realtime_test for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
- end)
-
- RateCounter.stop(tenant.external_id)
-
- %{tenant: tenant, conn: conn}
- end
+ setup [:integration]
test "subscribe inserts", %{tenant: tenant, conn: conn} do
on_exit(fn -> PostgresCdcRls.handle_stop(tenant.external_id, 10_000) end)
@@ -246,40 +208,26 @@ defmodule Realtime.Extensions.CdcRlsTest do
postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
args = Map.put(postgres_extension, "id", external_id)
- pg_change_params = [
- %{
- id: UUID.uuid1(),
- params: %{"event" => "*", "schema" => "public"},
- channel_pid: self(),
- claims: %{
- "exp" => System.system_time(:second) + 100_000,
- "iat" => 0,
- "ref" => "127.0.0.1",
- "role" => "anon"
- }
- }
- ]
-
- ids =
- Enum.map(pg_change_params, fn %{id: id, params: params} ->
- {UUID.string_to_binary!(id), :erlang.phash2(params)}
- end)
-
- topic = "realtime:test"
- serializer = Phoenix.Socket.V1.JSONSerializer
-
- subscription_metadata = {:subscriber_fastlane, self(), serializer, ids, topic, external_id, true}
- metadata = [metadata: subscription_metadata]
- :ok = PostgresCdc.subscribe(PostgresCdcRls, pg_change_params, external_id, metadata)
+ pg_change_params = pubsub_subscribe(external_id)
# First time it will return nil
PostgresCdcRls.handle_connect(args)
# Wait for it to start
- Process.sleep(3000)
+ assert_receive %{event: "ready"}, 3000
{:ok, response} = PostgresCdcRls.handle_connect(args)
+ assert_receive {
+ :telemetry,
+ [:realtime, :rpc],
+ %{latency: _},
+ %{
+ mechanism: :gen_rpc,
+ success: true
+ }
+ }
+
# Now subscribe to the Postgres Changes
- {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params)
+ {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params, external_id)
assert %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
# Insert a record
@@ -287,11 +235,6 @@ defmodule Realtime.Extensions.CdcRlsTest do
assert_receive {:socket_push, :text, data}, 5000
- message =
- data
- |> IO.iodata_to_binary()
- |> Jason.decode!()
-
assert %{
"event" => "postgres_changes",
"payload" => %{
@@ -308,71 +251,105 @@ defmodule Realtime.Extensions.CdcRlsTest do
},
"ref" => nil,
"topic" => "realtime:test"
- } = message
+ } = Jason.decode!(data)
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+
+ assert {:ok, %RateCounter{id: {:channel, :db_events, "dev_tenant"}, bucket: bucket}} =
+ RateCounterHelper.tick!(rate)
+
+ assert Enum.sum(bucket) == 1
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: _},
+ %{tenant: "dev_tenant", message_type: :postgres_changes}
+ }
+ end
+
+ test "db events rate limit works", %{tenant: tenant, conn: conn} do
+ on_exit(fn -> PostgresCdcRls.handle_stop(tenant.external_id, 10_000) end)
- # Wait for RateCounter to update
- Process.sleep(2000)
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
+ args = Map.put(postgres_extension, "id", external_id)
+
+ pg_change_params = pubsub_subscribe(external_id)
+
+ # First time it will return nil
+ PostgresCdcRls.handle_connect(args)
+ # Wait for it to start
+ assert_receive %{event: "ready"}, 1000
+ {:ok, response} = PostgresCdcRls.handle_connect(args)
+
+ # Now subscribe to the Postgres Changes
+ {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params, external_id)
+ assert %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
rate = Realtime.Tenants.db_events_per_second_rate(tenant)
- assert {:ok, %RateCounter{id: {:channel, :db_events, "dev_tenant"}, bucket: bucket}} = RateCounter.get(rate)
- assert 1 in bucket
+ log =
+ capture_log(fn ->
+ # increment artifically the counter to reach the limit
+ tenant.external_id
+ |> Realtime.Tenants.db_events_per_second_key()
+ |> Realtime.GenCounter.add(100_000_000)
+
+ RateCounterHelper.tick!(rate)
+ end)
+
+ assert log =~ "MessagePerSecondRateLimitReached: Too many postgres changes messages per second"
+
+ # Insert a record
+ %{rows: [[_id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+
+ refute_receive {:socket_push, :text, _}, 5000
+
+ assert {:ok, %RateCounter{id: {:channel, :db_events, "dev_tenant"}, bucket: bucket, limit: %{triggered: true}}} =
+ RateCounterHelper.tick!(rate)
+
+ # Nothing has changed
+ assert Enum.sum(bucket) == 100_000_000
end
+ end
- @aux_mod (quote do
- defmodule Subscriber do
- # Start CDC remotely
- def subscribe(tenant) do
- %Tenant{extensions: extensions, external_id: external_id} = tenant
- postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- args = Map.put(postgres_extension, "id", external_id)
-
- # Boot it
- PostgresCdcRls.start(args)
- # Wait for it to start
- Process.sleep(3000)
- {:ok, manager, conn} = PostgresCdcRls.get_manager_conn(external_id)
- {:ok, {manager, conn}}
- end
+ @aux_mod (quote do
+ defmodule Subscriber do
+ # Start CDC remotely
+ def subscribe(tenant) do
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
+ args = Map.put(postgres_extension, "id", external_id)
+
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
+ # First time it will return nil
+ PostgresCdcRls.start(args)
+ # Wait for it to start
+ assert_receive %{event: "ready"}, 3000
+ {:ok, manager, conn} = PostgresCdcRls.get_manager_conn(external_id)
+ {:ok, {manager, conn}}
end
- end)
+ end
+ end)
+ describe "distributed integration" do
+ setup [:integration]
- test "subscribe inserts distributed mode", %{tenant: tenant, conn: conn} do
+ setup(%{tenant: tenant}) do
{:ok, node} = Clustered.start(@aux_mod)
{:ok, response} = :erpc.call(node, Subscriber, :subscribe, [tenant])
+ %{node: node, response: response}
+ end
+
+ test "subscribe inserts distributed mode", %{tenant: tenant, conn: conn, node: node, response: response} do
%Tenant{extensions: extensions, external_id: external_id} = tenant
postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- pg_change_params = [
- %{
- id: UUID.uuid1(),
- params: %{"event" => "*", "schema" => "public"},
- channel_pid: self(),
- claims: %{
- "exp" => System.system_time(:second) + 100_000,
- "iat" => 0,
- "ref" => "127.0.0.1",
- "role" => "anon"
- }
- }
- ]
-
- ids =
- Enum.map(pg_change_params, fn %{id: id, params: params} ->
- {UUID.string_to_binary!(id), :erlang.phash2(params)}
- end)
-
- # Subscribe to the topic as a websocket client
- topic = "realtime:test"
- serializer = Phoenix.Socket.V1.JSONSerializer
-
- subscription_metadata = {:subscriber_fastlane, self(), serializer, ids, topic, external_id, true}
- metadata = [metadata: subscription_metadata]
- :ok = PostgresCdc.subscribe(PostgresCdcRls, pg_change_params, external_id, metadata)
+ pg_change_params = pubsub_subscribe(external_id)
# Now subscribe to the Postgres Changes
- {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params)
+ {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params, external_id)
assert %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
# Insert a record
@@ -380,11 +357,6 @@ defmodule Realtime.Extensions.CdcRlsTest do
assert_receive {:socket_push, :text, data}, 5000
- message =
- data
- |> IO.iodata_to_binary()
- |> Jason.decode!()
-
assert %{
"event" => "postgres_changes",
"payload" => %{
@@ -401,17 +373,120 @@ defmodule Realtime.Extensions.CdcRlsTest do
},
"ref" => nil,
"topic" => "realtime:test"
- } = message
+ } = Jason.decode!(data)
- # Wait for RateCounter to update
- Process.sleep(2000)
+ assert_receive {
+ :telemetry,
+ [:realtime, :rpc],
+ %{latency: _},
+ %{
+ mechanism: :gen_rpc,
+ origin_node: _,
+ success: true,
+ target_node: ^node
+ }
+ }
+ end
- rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ test "subscription error rate limit", %{tenant: tenant, node: node} do
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- assert {:ok, %RateCounter{id: {:channel, :db_events, "dev_tenant"}, bucket: bucket}} = RateCounter.get(rate)
- assert 1 in bucket
+ pg_change_params = pubsub_subscribe(external_id)
- :erpc.call(node, PostgresCdcRls, :handle_stop, [tenant.external_id, 10_000])
+ # Grab a process that is not alive to cause subscriptions to error out
+ pid = :erpc.call(node, :erlang, :self, [])
+
+ # Now subscribe to the Postgres Changes multiple times to reach the rate limit
+ for _ <- 1..6 do
+ assert {:error, "Too many database timeouts"} =
+ PostgresCdcRls.handle_after_connect({pid, pid}, postgres_extension, pg_change_params, external_id)
+ end
+
+ rate = Realtime.Tenants.subscription_errors_per_second_rate(external_id, 4)
+
+ assert {:ok, %RateCounter{id: {:channel, :subscription_errors, ^external_id}, sum: 6, limit: %{triggered: true}}} =
+ RateCounterHelper.tick!(rate)
+
+ # It won't even be called now
+ reject(&Realtime.GenRpc.call/5)
+
+ assert {:error, "Too many database timeouts"} =
+ PostgresCdcRls.handle_after_connect({pid, pid}, postgres_extension, pg_change_params, external_id)
end
end
+
+ defp integration(_) do
+ tenant = Api.get_tenant_by_external_id("dev_tenant")
+ PostgresCdcRls.handle_stop(tenant.external_id, 10_000)
+
+ {:ok, conn} = Database.connect(tenant, "realtime_test")
+
+ Database.transaction(conn, fn db_conn ->
+ queries = [
+ "drop table if exists public.test",
+ "drop publication if exists supabase_realtime_test",
+ "create sequence if not exists test_id_seq;",
+ """
+ create table if not exists "public"."test" (
+ "id" int4 not null default nextval('test_id_seq'::regclass),
+ "details" text,
+ primary key ("id"));
+ """,
+ "grant all on table public.test to anon;",
+ "grant all on table public.test to postgres;",
+ "grant all on table public.test to authenticated;",
+ "create publication supabase_realtime_test for all tables"
+ ]
+
+ Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
+ end)
+
+ RateCounterHelper.stop(tenant.external_id)
+ on_exit(fn -> RateCounterHelper.stop(tenant.external_id) end)
+
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ :telemetry.attach_many(
+ __MODULE__,
+ [[:realtime, :tenants, :payload, :size], [:realtime, :rpc]],
+ &__MODULE__.handle_telemetry/4,
+ pid: self()
+ )
+
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
+
+ %{tenant: tenant, conn: conn}
+ end
+
+ defp pubsub_subscribe(external_id) do
+ pg_change_params = [
+ %{
+ id: UUID.uuid1(),
+ params: %{"event" => "*", "schema" => "public"},
+ channel_pid: self(),
+ claims: %{
+ "exp" => System.system_time(:second) + 100_000,
+ "iat" => 0,
+ "ref" => "127.0.0.1",
+ "role" => "anon"
+ }
+ }
+ ]
+
+ topic = "realtime:test"
+ serializer = Phoenix.Socket.V1.JSONSerializer
+
+ ids =
+ Enum.map(pg_change_params, fn %{id: id, params: params} ->
+ {UUID.string_to_binary!(id), :erlang.phash2(params)}
+ end)
+
+ subscription_metadata = {:subscriber_fastlane, self(), serializer, ids, topic, true}
+ metadata = [metadata: subscription_metadata]
+ :ok = PostgresCdc.subscribe(PostgresCdcRls, pg_change_params, external_id, metadata)
+ pg_change_params
+ end
+
+ def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {:telemetry, event, measures, metadata})
end
diff --git a/test/realtime/extensions/cdc_rls/replication_poller_test.exs b/test/realtime/extensions/cdc_rls/replication_poller_test.exs
index 97d69af62..0fba63a66 100644
--- a/test/realtime/extensions/cdc_rls/replication_poller_test.exs
+++ b/test/realtime/extensions/cdc_rls/replication_poller_test.exs
@@ -1,8 +1,12 @@
-defmodule ReplicationPollerTest do
- use ExUnit.Case, async: false
+defmodule Realtime.Extensions.PostgresCdcRls.ReplicationPollerTest do
+ # Tweaking application env
+ use Realtime.DataCase, async: false
+ use Mimic
+
+ alias Extensions.PostgresCdcRls.MessageDispatcher
alias Extensions.PostgresCdcRls.ReplicationPoller, as: Poller
- import Poller, only: [generate_record: 1]
+ alias Extensions.PostgresCdcRls.Replications
alias Realtime.Adapters.Changes.{
DeletedRecord,
@@ -10,6 +14,284 @@ defmodule ReplicationPollerTest do
UpdatedRecord
}
+ alias Realtime.RateCounter
+
+ alias RealtimeWeb.TenantBroadcaster
+
+ import Poller, only: [generate_record: 1]
+
+ setup :set_mimic_global
+
+ @change_json ~s({"table":"test","type":"INSERT","record":{"id": 34, "details": "test"},"columns":[{"name": "id", "type": "int4"}, {"name": "details", "type": "text"}],"errors":null,"schema":"public","commit_timestamp":"2025-10-13T07:50:28.066Z"})
+
+ describe "poll" do
+ setup do
+ :telemetry.attach(
+ __MODULE__,
+ [:realtime, :replication, :poller, :query, :stop],
+ &__MODULE__.handle_telemetry/4,
+ pid: self()
+ )
+
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ tenant = Containers.checkout_tenant(run_migrations: true)
+
+ {:ok, tenant} = Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{"max_events_per_second" => 123})
+
+ subscribers_pids_table = :ets.new(__MODULE__, [:public, :bag])
+ subscribers_nodes_table = :ets.new(__MODULE__, [:public, :set])
+
+ args =
+ hd(tenant.extensions).settings
+ |> Map.put("id", tenant.external_id)
+ |> Map.put("subscribers_pids_table", subscribers_pids_table)
+ |> Map.put("subscribers_nodes_table", subscribers_nodes_table)
+
+ # unless specified it will return empty results
+ empty_results = {:ok, %Postgrex.Result{rows: [], num_rows: 0}}
+ stub(Replications, :list_changes, fn _, _, _, _, _ -> empty_results end)
+
+ %{args: args, tenant: tenant}
+ end
+
+ test "handles no new changes", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+ reject(&TenantBroadcaster.pubsub_direct_broadcast/6)
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+ start_link_supervised!({Poller, args})
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+
+ assert {:ok,
+ %RateCounter{
+ sum: sum,
+ limit: %{
+ value: 123,
+ measurement: :avg,
+ triggered: false
+ }
+ }} = RateCounterHelper.tick!(rate)
+
+ assert sum == 0
+ end
+
+ test "handles new changes with missing ets table", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+
+ :ets.delete(args["subscribers_nodes_table"])
+
+ results =
+ build_result([
+ <<71, 36, 83, 212, 168, 9, 17, 240, 165, 186, 118, 202, 193, 157, 232, 187>>,
+ <<251, 188, 190, 118, 168, 119, 17, 240, 188, 87, 118, 202, 193, 157, 232, 187>>
+ ])
+
+ expect(Replications, :list_changes, fn _, _, _, _, _ -> results end)
+ reject(&TenantBroadcaster.pubsub_direct_broadcast/6)
+
+ # Broadcast to the whole cluster due to missing node information
+ expect(TenantBroadcaster, :pubsub_broadcast, fn ^tenant_id,
+ "realtime:postgres:" <> ^tenant_id,
+ {"INSERT", change_json, _sub_ids},
+ MessageDispatcher,
+ :postgres_changes ->
+ assert Jason.decode!(change_json) == Jason.decode!(@change_json)
+ :ok
+ end)
+
+ start_link_supervised!({Poller, args})
+
+ # First poll with changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ # Second poll without changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert {:ok, %RateCounter{sum: sum}} = RateCounterHelper.tick!(rate)
+ assert sum == 2
+ end
+
+ test "handles new changes with no subscription nodes", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+
+ results =
+ build_result([
+ <<71, 36, 83, 212, 168, 9, 17, 240, 165, 186, 118, 202, 193, 157, 232, 187>>,
+ <<251, 188, 190, 118, 168, 119, 17, 240, 188, 87, 118, 202, 193, 157, 232, 187>>
+ ])
+
+ expect(Replications, :list_changes, fn _, _, _, _, _ -> results end)
+ reject(&TenantBroadcaster.pubsub_direct_broadcast/6)
+
+ # Broadcast to the whole cluster due to missing node information
+ expect(TenantBroadcaster, :pubsub_broadcast, fn ^tenant_id,
+ "realtime:postgres:" <> ^tenant_id,
+ {"INSERT", change_json, _sub_ids},
+ MessageDispatcher,
+ :postgres_changes ->
+ assert Jason.decode!(change_json) == Jason.decode!(@change_json)
+ :ok
+ end)
+
+ start_link_supervised!({Poller, args})
+
+ # First poll with changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ # Second poll without changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert {:ok, %RateCounter{sum: sum}} = RateCounterHelper.tick!(rate)
+ assert sum == 2
+ end
+
+ test "handles new changes with missing subscription nodes", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+
+ results =
+ build_result([
+ sub1 = <<71, 36, 83, 212, 168, 9, 17, 240, 165, 186, 118, 202, 193, 157, 232, 187>>,
+ <<251, 188, 190, 118, 168, 119, 17, 240, 188, 87, 118, 202, 193, 157, 232, 187>>
+ ])
+
+ # Only one subscription has node information
+ :ets.insert(args["subscribers_nodes_table"], {sub1, node()})
+
+ expect(Replications, :list_changes, fn _, _, _, _, _ -> results end)
+ reject(&TenantBroadcaster.pubsub_direct_broadcast/6)
+
+ # Broadcast to the whole cluster due to missing node information
+ expect(TenantBroadcaster, :pubsub_broadcast, fn ^tenant_id,
+ "realtime:postgres:" <> ^tenant_id,
+ {"INSERT", change_json, _sub_ids},
+ MessageDispatcher,
+ :postgres_changes ->
+ assert Jason.decode!(change_json) == Jason.decode!(@change_json)
+ :ok
+ end)
+
+ start_link_supervised!({Poller, args})
+
+ # First poll with changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ # Second poll without changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert {:ok, %RateCounter{sum: sum}} = RateCounterHelper.tick!(rate)
+ assert sum == 2
+ end
+
+ test "handles new changes with subscription nodes information", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+
+ results =
+ build_result([
+ sub1 = <<71, 36, 83, 212, 168, 9, 17, 240, 165, 186, 118, 202, 193, 157, 232, 187>>,
+ sub2 = <<251, 188, 190, 118, 168, 119, 17, 240, 188, 87, 118, 202, 193, 157, 232, 187>>,
+ sub3 = <<49, 59, 209, 112, 173, 77, 17, 240, 191, 41, 118, 202, 193, 157, 232, 187>>
+ ])
+
+ # All subscriptions have node information
+ :ets.insert(args["subscribers_nodes_table"], {sub1, node()})
+ :ets.insert(args["subscribers_nodes_table"], {sub2, :"someothernode@127.0.0.1"})
+ :ets.insert(args["subscribers_nodes_table"], {sub3, node()})
+
+ expect(Replications, :list_changes, fn _, _, _, _, _ -> results end)
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ topic = "realtime:postgres:" <> tenant_id
+
+ # # Broadcast to the exact nodes only
+ expect(TenantBroadcaster, :pubsub_direct_broadcast, 2, fn
+ _node, ^tenant_id, ^topic, {"INSERT", change_json, _sub_ids}, MessageDispatcher, :postgres_changes ->
+ assert Jason.decode!(change_json) == Jason.decode!(@change_json)
+ :ok
+ end)
+
+ start_link_supervised!({Poller, args})
+
+ # First poll with changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ # Second poll without changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ calls = calls(TenantBroadcaster, :pubsub_direct_broadcast, 6)
+
+ assert Enum.count(calls) == 2
+
+ node_subs = Enum.map(calls, fn [node, _, _, {"INSERT", _change_json, sub_ids}, _, _] -> {node, sub_ids} end)
+
+ assert {node(), MapSet.new([sub1, sub3])} in node_subs
+ assert {:"someothernode@127.0.0.1", MapSet.new([sub2])} in node_subs
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert {:ok, %RateCounter{sum: sum}} = RateCounterHelper.tick!(rate)
+ assert sum == 3
+ end
+ end
+
@columns [
%{"name" => "id", "type" => "int8"},
%{"name" => "details", "type" => "text"},
@@ -19,272 +301,277 @@ defmodule ReplicationPollerTest do
@ts "2021-11-05T17:20:51.52406+00:00"
@subscription_id "417e76fd-9bc5-4b3e-bd5d-a031389c4a6b"
+ @subscription_ids MapSet.new(["417e76fd-9bc5-4b3e-bd5d-a031389c4a6b"])
+
+ @old_record %{"id" => 12}
+ @record %{"details" => "test", "id" => 12, "user_id" => 1}
describe "generate_record/1" do
test "INSERT" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "record" => %{"details" => "test", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "INSERT"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "INSERT"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", nil},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", []}
]
- expected = %NewRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "INSERT",
- subscription_ids: MapSet.new([@subscription_id]),
- record: %{"details" => "test", "id" => 12, "user_id" => 1},
- errors: nil
- }
-
- assert expected == generate_record(record)
+ assert %NewRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "INSERT",
+ subscription_ids: @subscription_ids,
+ record: record,
+ errors: nil
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "UPDATE" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "old_record" => %{"id" => 12},
- "record" => %{"details" => "test1", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "UPDATE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "UPDATE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", []}
]
- expected = %UpdatedRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "UPDATE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{"id" => 12},
- record: %{"details" => "test1", "id" => 12, "user_id" => 1},
- errors: nil
- }
-
- assert expected == generate_record(record)
+ assert %UpdatedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "UPDATE",
+ subscription_ids: @subscription_ids,
+ record: record,
+ old_record: old_record,
+ errors: nil
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "DELETE" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "old_record" => %{"id" => 15},
- "schema" => "public",
- "table" => "todos",
- "type" => "DELETE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "DELETE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", nil},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", []}
]
- expected = %DeletedRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "DELETE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{"id" => 15},
- errors: nil
- }
-
- assert expected == generate_record(record)
+ assert %DeletedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "DELETE",
+ subscription_ids: @subscription_ids,
+ old_record: old_record,
+ errors: nil
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "INSERT, large payload error present" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "record" => %{"details" => "test", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "INSERT"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "INSERT"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", nil},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error 413: Payload Too Large"]}
]
- expected = %NewRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "INSERT",
- subscription_ids: MapSet.new([@subscription_id]),
- record: %{"details" => "test", "id" => 12, "user_id" => 1},
- errors: ["Error 413: Payload Too Large"]
- }
-
- assert expected == generate_record(record)
+ assert %NewRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "INSERT",
+ subscription_ids: @subscription_ids,
+ record: record,
+ errors: ["Error 413: Payload Too Large"]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "INSERT, other errors present" do
- record = [
- {"wal",
- %{
- "schema" => "public",
- "table" => "todos",
- "type" => "INSERT"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "INSERT"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", nil},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error..."]}
]
- expected = %NewRecord{
- columns: [],
- commit_timestamp: nil,
- schema: "public",
- table: "todos",
- type: "INSERT",
- subscription_ids: MapSet.new([@subscription_id]),
- record: %{},
- errors: ["Error..."]
- }
-
- assert expected == generate_record(record)
+ assert %NewRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "INSERT",
+ subscription_ids: @subscription_ids,
+ record: record,
+ errors: ["Error..."]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "UPDATE, large payload error present" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "old_record" => %{"details" => "prev test", "id" => 12, "user_id" => 1},
- "record" => %{"details" => "test", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "UPDATE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "UPDATE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error 413: Payload Too Large"]}
]
- expected = %UpdatedRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "UPDATE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{"details" => "prev test", "id" => 12, "user_id" => 1},
- record: %{"details" => "test", "id" => 12, "user_id" => 1},
- errors: ["Error 413: Payload Too Large"]
- }
-
- assert expected == generate_record(record)
+ assert %UpdatedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "UPDATE",
+ subscription_ids: @subscription_ids,
+ record: record,
+ old_record: old_record,
+ errors: ["Error 413: Payload Too Large"]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "UPDATE, other errors present" do
- record = [
- {"wal",
- %{
- "schema" => "public",
- "table" => "todos",
- "type" => "UPDATE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "UPDATE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error..."]}
]
- expected = %UpdatedRecord{
- columns: [],
- commit_timestamp: nil,
- schema: "public",
- table: "todos",
- type: "UPDATE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{},
- record: %{},
- errors: ["Error..."]
- }
-
- assert expected == generate_record(record)
+ assert %UpdatedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "UPDATE",
+ subscription_ids: @subscription_ids,
+ record: record,
+ old_record: old_record,
+ errors: ["Error..."]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "DELETE, large payload error present" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "old_record" => %{"details" => "test", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "DELETE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "DELETE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", nil},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error 413: Payload Too Large"]}
]
- expected = %DeletedRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "DELETE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{"details" => "test", "id" => 12, "user_id" => 1},
- errors: ["Error 413: Payload Too Large"]
- }
-
- assert expected == generate_record(record)
+ assert %DeletedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "DELETE",
+ subscription_ids: @subscription_ids,
+ old_record: old_record,
+ errors: ["Error 413: Payload Too Large"]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "DELETE, other errors present" do
- record = [
- {"wal",
- %{
- "schema" => "public",
- "table" => "todos",
- "type" => "DELETE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "DELETE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", nil},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error..."]}
]
- expected = %DeletedRecord{
- columns: [],
- commit_timestamp: nil,
- schema: "public",
- table: "todos",
- type: "DELETE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{},
- errors: ["Error..."]
- }
-
- assert expected == generate_record(record)
+ assert %DeletedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "DELETE",
+ subscription_ids: @subscription_ids,
+ old_record: old_record,
+ errors: ["Error..."]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
end
@@ -305,4 +592,40 @@ defmodule ReplicationPollerTest do
assert Poller.slot_name_suffix() == ""
end
end
+
+ def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {:telemetry, event, measures, metadata})
+
+ defp build_result(subscription_ids) do
+ {:ok,
+ %Postgrex.Result{
+ command: :select,
+ columns: [
+ "type",
+ "schema",
+ "table",
+ "columns",
+ "record",
+ "old_record",
+ "commit_timestamp",
+ "subscription_ids",
+ "errors"
+ ],
+ rows: [
+ [
+ "INSERT",
+ "public",
+ "test",
+ "[{\"name\": \"id\", \"type\": \"int4\"}, {\"name\": \"details\", \"type\": \"text\"}]",
+ "{\"id\": 34, \"details\": \"test\"}",
+ nil,
+ "2025-10-13T07:50:28.066Z",
+ subscription_ids,
+ []
+ ]
+ ],
+ num_rows: 1,
+ connection_id: 123,
+ messages: []
+ }}
+ end
end
diff --git a/test/realtime/extensions/cdc_rls/subscription_manager_test.exs b/test/realtime/extensions/cdc_rls/subscription_manager_test.exs
new file mode 100644
index 000000000..3fbde34b5
--- /dev/null
+++ b/test/realtime/extensions/cdc_rls/subscription_manager_test.exs
@@ -0,0 +1,160 @@
+defmodule Realtime.Extensions.CdcRls.SubscriptionManagerTest do
+ use Realtime.DataCase, async: true
+
+ alias Extensions.PostgresCdcRls
+ alias Extensions.PostgresCdcRls.SubscriptionManager
+ alias Extensions.PostgresCdcRls.Subscriptions
+
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+
+ subscribers_pids_table = :ets.new(__MODULE__, [:public, :bag])
+ subscribers_nodes_table = :ets.new(__MODULE__, [:public, :set])
+
+ args =
+ hd(tenant.extensions).settings
+ |> Map.put("id", tenant.external_id)
+ |> Map.put("subscribers_pids_table", subscribers_pids_table)
+ |> Map.put("subscribers_nodes_table", subscribers_nodes_table)
+
+ # register this process with syn as if this was the WorkersSupervisor
+
+ scope = Realtime.Syn.PostgresCdc.scope(tenant.external_id)
+ :syn.register(scope, tenant.external_id, self(), %{region: "us-east-1", manager: nil, subs_pool: nil})
+
+ {:ok, pid} = SubscriptionManager.start_link(Map.put(args, "id", tenant.external_id))
+ # This serves so that we know that handle_continue has finished
+ :sys.get_state(pid)
+ %{args: args, pid: pid}
+ end
+
+ describe "subscription" do
+ test "subscription", %{pid: pid, args: args} do
+ {:ok, ^pid, conn} = PostgresCdcRls.get_manager_conn(args["id"])
+ {uuid, bin_uuid, pg_change_params} = pg_change_params()
+
+ subscriber = self()
+
+ assert {:ok, [%Postgrex.Result{command: :insert, columns: ["id"], rows: [[1]], num_rows: 1}]} =
+ Subscriptions.create(conn, args["publication"], [pg_change_params], pid, subscriber)
+
+ # Wait for subscription manager to process the :subscribed message
+ :sys.get_state(pid)
+
+ node = node()
+
+ assert [{^subscriber, ^uuid, _ref, ^node}] = :ets.tab2list(args["subscribers_pids_table"])
+
+ assert :ets.tab2list(args["subscribers_nodes_table"]) == [{bin_uuid, node}]
+ end
+
+ test "subscriber died", %{pid: pid, args: args} do
+ {:ok, ^pid, conn} = PostgresCdcRls.get_manager_conn(args["id"])
+ self = self()
+
+ subscriber =
+ spawn(fn ->
+ receive do
+ :stop -> :ok
+ end
+ end)
+
+ {uuid1, bin_uuid1, pg_change_params1} = pg_change_params()
+ {uuid2, bin_uuid2, pg_change_params2} = pg_change_params()
+ {uuid3, bin_uuid3, pg_change_params3} = pg_change_params()
+
+ assert {:ok, _} =
+ Subscriptions.create(conn, args["publication"], [pg_change_params1, pg_change_params2], pid, subscriber)
+
+ assert {:ok, _} = Subscriptions.create(conn, args["publication"], [pg_change_params3], pid, self())
+
+ # Wait for subscription manager to process the :subscribed message
+ :sys.get_state(pid)
+
+ node = node()
+
+ assert :ets.info(args["subscribers_pids_table"], :size) == 3
+
+ assert [{^subscriber, ^uuid1, _, ^node}, {^subscriber, ^uuid2, _, ^node}] =
+ :ets.lookup(args["subscribers_pids_table"], subscriber)
+
+ assert [{^self, ^uuid3, _ref, ^node}] = :ets.lookup(args["subscribers_pids_table"], self)
+
+ assert :ets.info(args["subscribers_nodes_table"], :size) == 3
+ assert [{^bin_uuid1, ^node}] = :ets.lookup(args["subscribers_nodes_table"], bin_uuid1)
+ assert [{^bin_uuid2, ^node}] = :ets.lookup(args["subscribers_nodes_table"], bin_uuid2)
+ assert [{^bin_uuid3, ^node}] = :ets.lookup(args["subscribers_nodes_table"], bin_uuid3)
+
+ send(subscriber, :stop)
+ # Wait for subscription manager to receive the :DOWN message
+ Process.sleep(200)
+
+ # Only the subscription we have not stopped should remain
+
+ assert [{^self, ^uuid3, _ref, ^node}] = :ets.tab2list(args["subscribers_pids_table"])
+ assert [{^bin_uuid3, ^node}] = :ets.tab2list(args["subscribers_nodes_table"])
+ end
+ end
+
+ describe "subscription deletion" do
+ test "subscription is deleted when process goes away", %{pid: pid, args: args} do
+ {:ok, ^pid, conn} = PostgresCdcRls.get_manager_conn(args["id"])
+ {_uuid, _bin_uuid, pg_change_params} = pg_change_params()
+
+ subscriber =
+ spawn(fn ->
+ receive do
+ :stop -> :ok
+ end
+ end)
+
+ assert {:ok, [%Postgrex.Result{command: :insert, columns: ["id"], rows: [[1]], num_rows: 1}]} =
+ Subscriptions.create(conn, args["publication"], [pg_change_params], pid, subscriber)
+
+ # Wait for subscription manager to process the :subscribed message
+ :sys.get_state(pid)
+
+ assert :ets.info(args["subscribers_pids_table"], :size) == 1
+ assert :ets.info(args["subscribers_nodes_table"], :size) == 1
+
+ assert %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+
+ send(subscriber, :stop)
+ # Wait for subscription manager to receive the :DOWN message
+ Process.sleep(200)
+
+ assert :ets.info(args["subscribers_pids_table"], :size) == 0
+ assert :ets.info(args["subscribers_nodes_table"], :size) == 0
+
+ # Force check delete queue on manager
+ send(pid, :check_delete_queue)
+ Process.sleep(200)
+ end
+ end
+
+ describe "check no users" do
+ test "exit is sent to manager", %{pid: pid} do
+ :sys.replace_state(pid, fn state -> %{state | no_users_ts: 0} end)
+
+ send(pid, :check_no_users)
+
+ assert_receive {:system, {^pid, _}, {:terminate, :shutdown}}
+ end
+ end
+
+ defp pg_change_params do
+ uuid = UUID.uuid1()
+
+ pg_change_params = %{
+ id: uuid,
+ subscription_params: {"public", "*", []},
+ claims: %{
+ "exp" => System.system_time(:second) + 100_000,
+ "iat" => 0,
+ "role" => "anon"
+ }
+ }
+
+ {uuid, UUID.string_to_binary!(uuid), pg_change_params}
+ end
+end
diff --git a/test/realtime/extensions/cdc_rls/subscriptions_checker_distributed_test.exs b/test/realtime/extensions/cdc_rls/subscriptions_checker_distributed_test.exs
new file mode 100644
index 000000000..3b459e6c1
--- /dev/null
+++ b/test/realtime/extensions/cdc_rls/subscriptions_checker_distributed_test.exs
@@ -0,0 +1,66 @@
+defmodule Realtime.Extensions.CdcRls.SubscriptionsCheckerDistributedTest do
+ # Usage of Clustered
+ use ExUnit.Case, async: false
+ import ExUnit.CaptureLog
+
+ alias Extensions.PostgresCdcRls.SubscriptionsChecker, as: Checker
+
+ setup do
+ {:ok, peer, remote_node} = Clustered.start_disconnected()
+ true = Node.connect(remote_node)
+ {:ok, peer: peer, remote_node: remote_node}
+ end
+
+ describe "not_alive_pids_dist/1" do
+ test "returns empty list for all alive PIDs", %{remote_node: remote_node} do
+ assert Checker.not_alive_pids_dist(%{}) == []
+
+ pid1 = spawn(fn -> Process.sleep(5000) end)
+ pid2 = spawn(fn -> Process.sleep(5000) end)
+ pid3 = spawn(fn -> Process.sleep(5000) end)
+ pid4 = Node.spawn(remote_node, Process, :sleep, [5000])
+
+ assert Checker.not_alive_pids_dist(%{node() => MapSet.new([pid1, pid2, pid3]), remote_node => MapSet.new([pid4])}) ==
+ []
+ end
+
+ test "returns list of dead PIDs", %{remote_node: remote_node} do
+ pid1 = spawn(fn -> Process.sleep(5000) end)
+ pid2 = spawn(fn -> Process.sleep(5000) end)
+ pid3 = spawn(fn -> Process.sleep(5000) end)
+ pid4 = Node.spawn(remote_node, Process, :sleep, [5000])
+ pid5 = Node.spawn(remote_node, Process, :sleep, [5000])
+
+ Process.exit(pid2, :kill)
+ Process.exit(pid5, :kill)
+
+ assert Checker.not_alive_pids_dist(%{
+ node() => MapSet.new([pid1, pid2, pid3]),
+ remote_node => MapSet.new([pid4, pid5])
+ }) == [pid2, pid5]
+ end
+
+ test "handles rpc error", %{remote_node: remote_node, peer: peer} do
+ pid1 = spawn(fn -> Process.sleep(5000) end)
+ pid2 = spawn(fn -> Process.sleep(5000) end)
+ pid3 = spawn(fn -> Process.sleep(5000) end)
+ pid4 = Node.spawn(remote_node, Process, :sleep, [5000])
+ pid5 = Node.spawn(remote_node, Process, :sleep, [5000])
+
+ Process.exit(pid2, :kill)
+
+ # Stop the other node
+ :peer.stop(peer)
+
+ log =
+ capture_log(fn ->
+ assert Checker.not_alive_pids_dist(%{
+ node() => MapSet.new([pid1, pid2, pid3]),
+ remote_node => MapSet.new([pid4, pid5])
+ }) == [pid2]
+ end)
+
+ assert log =~ "UnableToCheckProcessesOnRemoteNode"
+ end
+ end
+end
diff --git a/test/realtime/extensions/cdc_rls/subscriptions_checker_test.exs b/test/realtime/extensions/cdc_rls/subscriptions_checker_test.exs
index bfbb4bd7a..db39678ac 100644
--- a/test/realtime/extensions/cdc_rls/subscriptions_checker_test.exs
+++ b/test/realtime/extensions/cdc_rls/subscriptions_checker_test.exs
@@ -1,9 +1,10 @@
-defmodule SubscriptionsCheckerTest do
+defmodule Realtime.Extensions.PostgresCdcRl.SubscriptionsCheckerTest do
use ExUnit.Case, async: true
alias Extensions.PostgresCdcRls.SubscriptionsChecker, as: Checker
+ import UUID, only: [uuid1: 0, string_to_binary!: 1]
test "subscribers_by_node/1" do
- tid = :ets.new(:table, [:public, :bag])
+ subscribers_pids_table = :ets.new(:table, [:public, :bag])
test_data = [
{:pid1, "id1", :ref, :node1},
@@ -11,9 +12,9 @@ defmodule SubscriptionsCheckerTest do
{:pid2, "id2", :ref, :node2}
]
- :ets.insert(tid, test_data)
+ :ets.insert(subscribers_pids_table, test_data)
- assert Checker.subscribers_by_node(tid) == %{
+ assert Checker.subscribers_by_node(subscribers_pids_table) == %{
node1: MapSet.new([:pid1]),
node2: MapSet.new([:pid2])
}
@@ -40,41 +41,66 @@ defmodule SubscriptionsCheckerTest do
end
end
- describe "pop_not_alive_pids/2" do
+ describe "pop_not_alive_pids/4" do
test "one subscription per channel" do
- tid = :ets.new(:table, [:public, :bag])
+ subscribers_pids_table = :ets.new(:table, [:public, :bag])
+ subscribers_nodes_table = :ets.new(:table, [:public, :set])
- uuid1 = UUID.uuid1()
- uuid2 = UUID.uuid1()
+ uuid1 = uuid1()
+ uuid2 = uuid1()
+ uuid3 = uuid1()
- test_data = [
+ pids_test_data = [
{:pid1, uuid1, :ref, :node1},
{:pid1, uuid2, :ref, :node1},
- {:pid2, "uuid", :ref, :node2}
+ {:pid2, uuid3, :ref, :node2}
]
- :ets.insert(tid, test_data)
+ :ets.insert(subscribers_pids_table, pids_test_data)
+
+ nodes_test_data = [
+ {string_to_binary!(uuid1), :node1},
+ {string_to_binary!(uuid2), :node1},
+ {string_to_binary!(uuid3), :node2}
+ ]
- not_alive = Enum.sort(Checker.pop_not_alive_pids([:pid1], tid, "id"))
- expected = Enum.sort([UUID.string_to_binary!(uuid1), UUID.string_to_binary!(uuid2)])
+ :ets.insert(subscribers_nodes_table, nodes_test_data)
+
+ not_alive = Enum.sort(Checker.pop_not_alive_pids([:pid1], subscribers_pids_table, subscribers_nodes_table, "id"))
+ expected = Enum.sort([string_to_binary!(uuid1), string_to_binary!(uuid2)])
assert not_alive == expected
- assert :ets.tab2list(tid) == [{:pid2, "uuid", :ref, :node2}]
+ assert :ets.tab2list(subscribers_pids_table) == [{:pid2, uuid3, :ref, :node2}]
+ assert :ets.tab2list(subscribers_nodes_table) == [{string_to_binary!(uuid3), :node2}]
end
test "two subscriptions per channel" do
- tid = :ets.new(:table, [:public, :bag])
+ subscribers_pids_table = :ets.new(:table, [:public, :bag])
+ subscribers_nodes_table = :ets.new(:table, [:public, :set])
- uuid1 = UUID.uuid1()
+ uuid1 = uuid1()
+ uuid2 = uuid1()
test_data = [
{:pid1, uuid1, :ref, :node1},
- {:pid2, "uuid", :ref, :node2}
+ {:pid2, uuid2, :ref, :node2}
]
- :ets.insert(tid, test_data)
- assert Checker.pop_not_alive_pids([:pid1], tid, "id") == [UUID.string_to_binary!(uuid1)]
- assert :ets.tab2list(tid) == [{:pid2, "uuid", :ref, :node2}]
+ :ets.insert(subscribers_pids_table, test_data)
+
+ nodes_test_data = [
+ {string_to_binary!(uuid1), :node1},
+ {string_to_binary!(uuid2), :node2}
+ ]
+
+ :ets.insert(subscribers_nodes_table, nodes_test_data)
+
+ assert Checker.pop_not_alive_pids([:pid1], subscribers_pids_table, subscribers_nodes_table, "id") == [
+ string_to_binary!(uuid1)
+ ]
+
+ assert :ets.tab2list(subscribers_pids_table) == [{:pid2, uuid2, :ref, :node2}]
+ assert :ets.tab2list(subscribers_nodes_table) == [{string_to_binary!(uuid2), :node2}]
end
end
end
diff --git a/test/realtime/extensions/cdc_rls/subscriptions_test.exs b/test/realtime/extensions/cdc_rls/subscriptions_test.exs
index cb53b72ed..975313861 100644
--- a/test/realtime/extensions/cdc_rls/subscriptions_test.exs
+++ b/test/realtime/extensions/cdc_rls/subscriptions_test.exs
@@ -1,13 +1,13 @@
-defmodule Realtime.Extensionsubscriptions.CdcRlsSubscriptionsTest do
+defmodule Realtime.Extensions.PostgresCdcRls.SubscriptionsTest do
use RealtimeWeb.ChannelCase, async: true
- doctest Extensions.PostgresCdcRls.Subscriptions
+
+ doctest Extensions.PostgresCdcRls.Subscriptions, import: true
alias Extensions.PostgresCdcRls.Subscriptions
alias Realtime.Database
- alias Realtime.Tenants
setup do
- tenant = Tenants.get_tenant_by_external_id("dev_tenant")
+ tenant = Containers.checkout_tenant(run_migrations: true)
{:ok, conn} =
tenant
@@ -16,106 +16,194 @@ defmodule Realtime.Extensionsubscriptions.CdcRlsSubscriptionsTest do
|> Keyword.new()
|> Postgrex.start_link()
+ Subscriptions.delete_all(conn)
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+
%{conn: conn}
end
- test "create", %{conn: conn} do
- Subscriptions.delete_all(conn)
+ describe "create/5" do
+ test "create all tables & all events", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"event" => "*", "schema" => "public"})
+ params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert {:ok, [%Postgrex.Result{}]} =
+ Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{"event" => "*", "schema" => "public"}}]
+ %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
- assert {:ok, [%Postgrex.Result{}]} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ test "create specific table all events", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "test"})
- Process.sleep(500)
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{"schema" => "public", "table" => "test"}}]
+ assert {:ok, [%Postgrex.Result{}]} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- assert {:ok, [%Postgrex.Result{}]} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
- Process.sleep(500)
+ test "publication does not exist", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "test"})
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{}}]
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- assert {:error,
- "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: %{}"} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ Postgrex.query!(conn, "drop publication if exists supabase_realtime_test", [])
- Process.sleep(500)
+ assert {:error,
+ {:subscription_insert_failed,
+ "Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [schema: public, table: test, filters: []]"}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{"user_token" => "potato"}}]
+ %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
- assert {:error,
- "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: "} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ test "table does not exist", %{conn: conn} do
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "doesnotexist"})
- Process.sleep(500)
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{"auth_token" => "potato"}}]
+ assert {:error,
+ {:subscription_insert_failed,
+ "Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [schema: public, table: doesnotexist, filters: []]"}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- assert {:error,
- "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: "} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
- Process.sleep(500)
+ test "column does not exist", %{conn: conn} do
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{
+ "schema" => "public",
+ "table" => "test",
+ "filter" => "subject=eq.hey"
+ })
- %Postgrex.Result{rows: [[num]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
- assert num != 0
- end
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- test "delete_all", %{conn: conn} do
- create_subscriptions(conn, 10)
- assert {:ok, %Postgrex.Result{}} = Subscriptions.delete_all(conn)
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
- end
+ assert {:error,
+ {:subscription_insert_failed,
+ "Unable to subscribe to changes with given parameters. An exception happened so please check your connect parameters: [schema: public, table: test, filters: [{\"subject\", \"eq\", \"hey\"}]]. Exception: ERROR P0001 (raise_exception) invalid column for filter subject"}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- test "delete", %{conn: conn} do
- Subscriptions.delete_all(conn)
- id = UUID.uuid1()
- bin_id = UUID.string_to_binary!(id)
+ %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
- params_list = [%{id: id, claims: %{"role" => "anon"}, params: %{"event" => "*"}}]
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- Process.sleep(500)
+ test "column type is wrong", %{conn: conn} do
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{
+ "schema" => "public",
+ "table" => "test",
+ "filter" => "id=eq.hey"
+ })
- assert {:ok, %Postgrex.Result{}} = Subscriptions.delete(conn, bin_id)
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
+
+ assert {:error,
+ {:subscription_insert_failed,
+ "Unable to subscribe to changes with given parameters. An exception happened so please check your connect parameters: [schema: public, table: test, filters: [{\"id\", \"eq\", \"hey\"}]]. Exception: ERROR 22P02 (invalid_text_representation) invalid input syntax for type integer: \"hey\""}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+
+ %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
+
+ test "connection error" do
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "test"})
+
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
+ conn = spawn(fn -> :ok end)
+
+ assert {:error, {:exit, _}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+ end
+
+ test "timeout", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "test"})
+
+ Task.start(fn -> Postgrex.query!(conn, "SELECT pg_sleep(20)", []) end)
+
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
+
+ assert {:error, %DBConnection.ConnectionError{reason: :queue_timeout}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+ end
end
- test "delete_multi", %{conn: conn} do
- Subscriptions.delete_all(conn)
- id1 = UUID.uuid1()
- id2 = UUID.uuid1()
+ describe "delete_all/1" do
+ test "delete_all", %{conn: conn} do
+ create_subscriptions(conn, 10)
+ assert {:ok, %Postgrex.Result{}} = Subscriptions.delete_all(conn)
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
+ end
- bin_id2 = UUID.string_to_binary!(id2)
- bin_id1 = UUID.string_to_binary!(id1)
+ describe "delete/2" do
+ test "delete", %{conn: conn} do
+ id = UUID.uuid1()
+ bin_id = UUID.string_to_binary!(id)
- params_list = [
- %{claims: %{"role" => "anon"}, id: id1, params: %{"event" => "*"}},
- %{claims: %{"role" => "anon"}, id: id2, params: %{"event" => "*"}}
- ]
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{
+ "schema" => "public",
+ "table" => "test",
+ "filter" => "id=eq.hey"
+ })
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- Process.sleep(500)
+ subscription_list = [%{claims: %{"role" => "anon"}, id: id, subscription_params: subscription_params}]
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- assert {:ok, %Postgrex.Result{}} = Subscriptions.delete_multi(conn, [bin_id1, bin_id2])
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert {:ok, %Postgrex.Result{}} = Subscriptions.delete(conn, bin_id)
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
end
- test "maybe_delete_all", %{conn: conn} do
- Subscriptions.delete_all(conn)
- create_subscriptions(conn, 10)
+ describe "delete_multi/2" do
+ test "delete_multi", %{conn: conn} do
+ Subscriptions.delete_all(conn)
+ id1 = UUID.uuid1()
+ id2 = UUID.uuid1()
+
+ bin_id2 = UUID.string_to_binary!(id2)
+ bin_id1 = UUID.string_to_binary!(id1)
+
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{
+ "schema" => "public",
+ "table" => "test",
+ "filter" => "id=eq.123"
+ })
+
+ subscription_list = [
+ %{claims: %{"role" => "anon"}, id: id1, subscription_params: subscription_params},
+ %{claims: %{"role" => "anon"}, id: id2, subscription_params: subscription_params}
+ ]
+
+ assert {:ok, _} = Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+
+ assert %Postgrex.Result{rows: [[2]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert {:ok, %Postgrex.Result{}} = Subscriptions.delete_multi(conn, [bin_id1, bin_id2])
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
+ end
- assert {:ok, %Postgrex.Result{}} = Subscriptions.maybe_delete_all(conn)
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ describe "maybe_delete_all/1" do
+ test "maybe_delete_all", %{conn: conn} do
+ Subscriptions.delete_all(conn)
+ create_subscriptions(conn, 10)
+
+ assert {:ok, %Postgrex.Result{}} = Subscriptions.maybe_delete_all(conn)
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
end
- test "fetch_publication_tables", %{conn: conn} do
- tables = Subscriptions.fetch_publication_tables(conn, "supabase_realtime_test")
- assert tables[{"*"}] != nil
+ describe "fetch_publication_tables/2" do
+ test "fetch_publication_tables", %{conn: conn} do
+ tables = Subscriptions.fetch_publication_tables(conn, "supabase_realtime_test")
+ assert tables[{"*"}] != nil
+ end
end
defp create_subscriptions(conn, num) do
@@ -131,13 +219,12 @@ defmodule Realtime.Extensionsubscriptions.CdcRlsSubscriptionsTest do
"role" => "anon"
},
id: UUID.uuid1(),
- params: %{"event" => "*", "schema" => "public"}
+ subscription_params: {"public", "*", []}
}
| acc
]
end)
Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- Process.sleep(500)
end
end
diff --git a/test/realtime/gen_rpc_pub_sub/worker_test.exs b/test/realtime/gen_rpc_pub_sub/worker_test.exs
new file mode 100644
index 000000000..880fa5132
--- /dev/null
+++ b/test/realtime/gen_rpc_pub_sub/worker_test.exs
@@ -0,0 +1,71 @@
+defmodule Realtime.GenRpcPubSub.WorkerTest do
+ use ExUnit.Case, async: true
+ alias Realtime.GenRpcPubSub.Worker
+ alias Realtime.GenRpc
+ alias Realtime.Nodes
+
+ use Mimic
+
+ @topic "test_topic"
+
+ setup do
+ worker = start_link_supervised!({Worker, {Realtime.PubSub, __MODULE__}})
+ %{worker: worker}
+ end
+
+ describe "forward to local" do
+ test "local broadcast", %{worker: worker} do
+ :ok = Phoenix.PubSub.subscribe(Realtime.PubSub, @topic)
+ send(worker, Worker.forward_to_local(@topic, "le message", Phoenix.PubSub))
+
+ assert_receive "le message"
+ refute_receive _any
+ end
+ end
+
+ describe "forward to region" do
+ setup %{worker: worker} do
+ GenRpc
+ |> stub()
+ |> allow(self(), worker)
+
+ Nodes
+ |> stub()
+ |> allow(self(), worker)
+
+ :ok
+ end
+
+ test "local broadcast + forward to other nodes", %{worker: worker} do
+ parent = self()
+ expect(Nodes, :region_nodes, fn "us-east-1" -> [node(), :node_us_2, :node_us_3] end)
+
+ expect(GenRpc, :abcast, fn [:node_us_2, :node_us_3],
+ Realtime.GenRpcPubSub.WorkerTest,
+ {:ftl, "test_topic", "le message", Phoenix.PubSub},
+ [] ->
+ send(parent, :abcast_called)
+ :ok
+ end)
+
+ :ok = Phoenix.PubSub.subscribe(Realtime.PubSub, @topic)
+ send(worker, Worker.forward_to_region(@topic, "le message", Phoenix.PubSub))
+
+ assert_receive "le message"
+ assert_receive :abcast_called
+ refute_receive _any
+ end
+
+ test "local broadcast and no other nodes", %{worker: worker} do
+ expect(Nodes, :region_nodes, fn "us-east-1" -> [node()] end)
+
+ reject(GenRpc, :abcast, 4)
+
+ :ok = Phoenix.PubSub.subscribe(Realtime.PubSub, @topic)
+ send(worker, Worker.forward_to_region(@topic, "le message", Phoenix.PubSub))
+
+ assert_receive "le message"
+ refute_receive _any
+ end
+ end
+end
diff --git a/test/realtime/gen_rpc_pub_sub_test.exs b/test/realtime/gen_rpc_pub_sub_test.exs
new file mode 100644
index 000000000..4c5ded562
--- /dev/null
+++ b/test/realtime/gen_rpc_pub_sub_test.exs
@@ -0,0 +1,126 @@
+Application.put_env(:phoenix_pubsub, :test_adapter, {Realtime.GenRpcPubSub, []})
+Code.require_file("../../deps/phoenix_pubsub/test/shared/pubsub_test.exs", __DIR__)
+
+defmodule Realtime.GenRpcPubSubTest do
+ # Application env being changed
+ use ExUnit.Case, async: false
+
+ test "it sets off_heap message_queue_data flag on the workers" do
+ assert Realtime.PubSubElixir.Realtime.PubSub.Adapter_1
+ |> Process.whereis()
+ |> Process.info(:message_queue_data) == {:message_queue_data, :off_heap}
+ end
+
+ test "it sets fullsweep_after flag on the workers" do
+ assert Realtime.PubSubElixir.Realtime.PubSub.Adapter_1
+ |> Process.whereis()
+ |> Process.info(:fullsweep_after) == {:fullsweep_after, 20}
+ end
+
+ @aux_mod (quote do
+ defmodule Subscriber do
+ # Relay messages to testing node
+ def subscribe(subscriber, topic) do
+ spawn(fn ->
+ RealtimeWeb.Endpoint.subscribe(topic)
+ 2 = length(Realtime.Nodes.region_nodes("us-east-1"))
+ 2 = length(Realtime.Nodes.region_nodes("ap-southeast-2"))
+ send(subscriber, {:ready, Application.get_env(:realtime, :region)})
+
+ loop = fn f ->
+ receive do
+ msg -> send(subscriber, {:relay, node(), msg})
+ end
+
+ f.(f)
+ end
+
+ loop.(loop)
+ end)
+ end
+ end
+ end)
+
+ Code.eval_quoted(@aux_mod)
+
+ @topic "gen-rpc-pub-sub-test-topic"
+
+ for regional_broadcasting <- [true, false] do
+ describe "regional balancing = #{regional_broadcasting}" do
+ setup do
+ previous_region = Application.get_env(:realtime, :region)
+ Application.put_env(:realtime, :region, "us-east-1")
+ on_exit(fn -> Application.put_env(:realtime, :region, previous_region) end)
+
+ previous_regional_broadcast = Application.get_env(:realtime, :regional_broadcasting)
+ Application.put_env(:realtime, :regional_broadcasting, unquote(regional_broadcasting))
+ on_exit(fn -> Application.put_env(:realtime, :regional_broadcasting, previous_regional_broadcast) end)
+
+ :ok
+ end
+
+ @describetag regional_broadcasting: regional_broadcasting
+
+ test "all messages are received" do
+ # start 1 node in us-east-1 to test my region broadcasting
+ # start 2 nodes in ap-southeast-2 to test other region broadcasting
+
+ us_node = :us_node
+ ap2_nodeX = :ap2_nodeX
+ ap2_nodeY = :ap2_nodeY
+
+ # Avoid port collision
+ client_config_per_node = %{
+ :"main@127.0.0.1" => 5969,
+ :"#{us_node}@127.0.0.1" => 16970,
+ :"#{ap2_nodeX}@127.0.0.1" => 16971,
+ :"#{ap2_nodeY}@127.0.0.1" => 16972
+ }
+
+ extra_config = [{:gen_rpc, :client_config_per_node, {:internal, client_config_per_node}}]
+
+ on_exit(fn -> Application.put_env(:gen_rpc, :client_config_per_node, {:internal, %{}}) end)
+ Application.put_env(:gen_rpc, :client_config_per_node, {:internal, client_config_per_node})
+
+ us_extra_config =
+ [{:realtime, :region, "us-east-1"}, {:gen_rpc, :tcp_server_port, 16970}] ++ extra_config
+
+ {:ok, _} = Clustered.start(@aux_mod, name: us_node, extra_config: us_extra_config, phoenix_port: 4014)
+
+ ap2_nodeX_extra_config =
+ [{:realtime, :region, "ap-southeast-2"}, {:gen_rpc, :tcp_server_port, 16971}] ++ extra_config
+
+ {:ok, _} = Clustered.start(@aux_mod, name: ap2_nodeX, extra_config: ap2_nodeX_extra_config, phoenix_port: 4015)
+
+ ap2_nodeY_extra_config =
+ [{:realtime, :region, "ap-southeast-2"}, {:gen_rpc, :tcp_server_port, 16972}] ++ extra_config
+
+ {:ok, _} = Clustered.start(@aux_mod, name: ap2_nodeY, extra_config: ap2_nodeY_extra_config, phoenix_port: 4016)
+
+ # Ensuring that syn had enough time to propagate to all nodes the group information
+ Process.sleep(3000)
+
+ RealtimeWeb.Endpoint.subscribe(@topic)
+ :erpc.multicall(Node.list(), Subscriber, :subscribe, [self(), @topic])
+
+ assert length(Realtime.Nodes.region_nodes("us-east-1")) == 2
+ assert length(Realtime.Nodes.region_nodes("ap-southeast-2")) == 2
+
+ assert_receive {:ready, "us-east-1"}
+ assert_receive {:ready, "ap-southeast-2"}
+ assert_receive {:ready, "ap-southeast-2"}
+
+ message = %Phoenix.Socket.Broadcast{topic: @topic, event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
+ Phoenix.PubSub.broadcast(Realtime.PubSub, @topic, message)
+
+ assert_receive ^message
+
+ # Remote nodes received the broadcast
+ assert_receive {:relay, :"us_node@127.0.0.1", ^message}, 5000
+ assert_receive {:relay, :"ap2_nodeX@127.0.0.1", ^message}, 1000
+ assert_receive {:relay, :"ap2_nodeY@127.0.0.1", ^message}, 1000
+ refute_receive _any
+ end
+ end
+ end
+end
diff --git a/test/realtime/gen_rpc_test.exs b/test/realtime/gen_rpc_test.exs
index dd837aaf8..fbbd155f4 100644
--- a/test/realtime/gen_rpc_test.exs
+++ b/test/realtime/gen_rpc_test.exs
@@ -28,7 +28,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: true,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -43,7 +42,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: false,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -57,7 +55,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: true,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -72,7 +69,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -94,7 +90,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
end
@@ -116,7 +111,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
end
@@ -131,7 +125,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: false,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -146,7 +139,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -168,10 +160,101 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
end
+
+ test "bad node" do
+ node = :"unknown@1.1.1.1"
+
+ log =
+ capture_log(fn ->
+ assert GenRpc.call(node, Map, :fetch, [%{a: 1}, :a], tenant_id: 123) == {:error, :rpc_error, :badnode}
+ end)
+
+ assert log =~
+ ~r/project=123 external_id=123 \[error\] ErrorOnRpcCall: %{+error: :badnode, mod: Map, func: :fetch, target: :"#{node}"/
+ end
+ end
+
+ describe "abcast/4" do
+ test "abcast to registered process", %{node: node} do
+ name =
+ System.unique_integer()
+ |> to_string()
+ |> String.to_atom()
+
+ :erlang.register(name, self())
+
+ # Use erpc to make the other node abcast to this one
+ :erpc.call(node, GenRpc, :abcast, [[node()], name, "a message", []])
+
+ assert_receive "a message"
+ refute_receive _any
+ end
+
+ @tag extra_config: [{:gen_rpc, :tcp_server_port, 9999}]
+ test "tcp error" do
+ Logger.put_process_level(self(), :debug)
+
+ log =
+ capture_log(fn ->
+ assert GenRpc.abcast(Node.list(), :some_process_name, "a message", []) == :ok
+ # We have to wait for gen_rpc logs to show up
+ Process.sleep(100)
+ end)
+
+ assert log =~ "[error] event=connect_to_remote_server"
+
+ refute_receive _any
+ end
+ end
+
+ describe "cast/5" do
+ test "apply on a local node" do
+ parent = self()
+
+ assert GenRpc.cast(node(), Kernel, :send, [parent, :sent]) == :ok
+
+ assert_receive :sent
+ refute_receive _any
+ end
+
+ test "apply on a remote node", %{node: node} do
+ parent = self()
+
+ assert GenRpc.cast(node, Kernel, :send, [parent, :sent]) == :ok
+
+ assert_receive :sent
+ refute_receive _any
+ end
+
+ test "bad node does nothing" do
+ node = :"unknown@1.1.1.1"
+
+ parent = self()
+
+ assert GenRpc.cast(node, Kernel, :send, [parent, :sent]) == :ok
+
+ refute_receive _any
+ end
+
+ @tag extra_config: [{:gen_rpc, :tcp_server_port, 9999}]
+ test "tcp error", %{node: node} do
+ parent = self()
+ Logger.put_process_level(self(), :debug)
+
+ log =
+ capture_log(fn ->
+ assert GenRpc.cast(node, Kernel, :send, [parent, :sent]) == :ok
+ # We have to wait for gen_rpc logs to show up
+ Process.sleep(100)
+ end)
+
+ assert log =~ "[error] event=connect_to_remote_server"
+
+ refute_receive _any
+ end
end
describe "multicast/4" do
@@ -223,7 +306,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: true,
- tenant: "123",
mechanism: :gen_rpc
}}
@@ -232,7 +314,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: true,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -259,7 +340,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
@@ -268,7 +348,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
end
@@ -293,7 +372,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
@@ -302,7 +380,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: true,
- tenant: 123,
mechanism: :gen_rpc
}}
end
diff --git a/test/realtime/messages_test.exs b/test/realtime/messages_test.exs
index 3bef9a5e0..5590adca9 100644
--- a/test/realtime/messages_test.exs
+++ b/test/realtime/messages_test.exs
@@ -1,10 +1,11 @@
defmodule Realtime.MessagesTest do
- use Realtime.DataCase, async: true
+ # usage of Clustered
+ use Realtime.DataCase, async: false
alias Realtime.Api.Message
alias Realtime.Database
alias Realtime.Messages
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
@@ -13,35 +14,248 @@ defmodule Realtime.MessagesTest do
date_start = Date.utc_today() |> Date.add(-10)
date_end = Date.utc_today()
create_messages_partitions(conn, date_start, date_end)
+
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ :telemetry.attach(
+ __MODULE__,
+ [:realtime, :tenants, :replay],
+ &__MODULE__.handle_telemetry/4,
+ pid: self()
+ )
+
%{conn: conn, tenant: tenant, date_start: date_start, date_end: date_end}
end
- test "delete_old_messages/1 deletes messages older than 72 hours", %{
- conn: conn,
- tenant: tenant,
- date_start: date_start,
- date_end: date_end
- } do
- utc_now = NaiveDateTime.utc_now()
- limit = NaiveDateTime.add(utc_now, -72, :hour)
-
- messages =
- for date <- Date.range(date_start, date_end) do
- inserted_at = date |> NaiveDateTime.new!(Time.new!(0, 0, 0))
- message_fixture(tenant, %{inserted_at: inserted_at})
+ describe "replay/5" do
+ test "invalid replay params", %{tenant: tenant} do
+ assert Messages.replay(self(), tenant.external_id, "a topic", "not a number", 123) ==
+ {:error, :invalid_replay_params}
+
+ assert Messages.replay(self(), tenant.external_id, "a topic", 123, "not a number") ==
+ {:error, :invalid_replay_params}
+
+ assert Messages.replay(self(), tenant.external_id, "a topic", 253_402_300_800_000, 10) ==
+ {:error, :invalid_replay_params}
+ end
+
+ test "empty replay", %{conn: conn} do
+ assert Messages.replay(conn, "tenant_id", "test", 0, 10) == {:ok, [], MapSet.new()}
+ end
+
+ test "replay respects limit", %{conn: conn, tenant: tenant} do
+ external_id = tenant.external_id
+
+ m1 =
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "new",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "new"}
+ })
+
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "old",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "old"}
+ })
+
+ assert Messages.replay(conn, external_id, "test", 0, 1) == {:ok, [m1], MapSet.new([m1.id])}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :replay],
+ %{latency: _},
+ %{tenant: ^external_id}
+ }
+ end
+
+ test "replay private topic only", %{conn: conn, tenant: tenant} do
+ privatem =
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "new",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "new"}
+ })
+
+ message_fixture(tenant, %{
+ "private" => false,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "old",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "old"}
+ })
+
+ assert Messages.replay(conn, tenant.external_id, "test", 0, 10) == {:ok, [privatem], MapSet.new([privatem.id])}
+ end
+
+ test "replay extension=broadcast", %{conn: conn, tenant: tenant} do
+ privatem =
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "new",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "new"}
+ })
+
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "old",
+ "extension" => "presence",
+ "topic" => "test",
+ "payload" => %{"value" => "old"}
+ })
+
+ assert Messages.replay(conn, tenant.external_id, "test", 0, 10) == {:ok, [privatem], MapSet.new([privatem.id])}
+ end
+
+ test "replay respects since", %{conn: conn, tenant: tenant} do
+ m1 =
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "first",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "first"}
+ })
+
+ m2 =
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "second",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "second"}
+ })
+
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-10, :minute),
+ "event" => "old",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "old"}
+ })
+
+ since = DateTime.utc_now() |> DateTime.add(-3, :minute) |> DateTime.to_unix(:millisecond)
+
+ assert Messages.replay(conn, tenant.external_id, "test", since, 10) == {:ok, [m1, m2], MapSet.new([m1.id, m2.id])}
+ end
+
+ test "replay respects hard max limit of 25", %{conn: conn, tenant: tenant} do
+ for _i <- 1..30 do
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now(),
+ "event" => "event",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "message"}
+ })
end
- assert length(messages) == 11
+ assert {:ok, messages, set} = Messages.replay(conn, tenant.external_id, "test", 0, 30)
+ assert length(messages) == 25
+ assert MapSet.size(set) == 25
+ end
+
+ test "replay respects hard min limit of 1", %{conn: conn, tenant: tenant} do
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now(),
+ "event" => "event",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "message"}
+ })
+
+ assert {:ok, messages, set} = Messages.replay(conn, tenant.external_id, "test", 0, 0)
+ assert length(messages) == 1
+ assert MapSet.size(set) == 1
+ end
+
+ test "distributed replay", %{conn: conn, tenant: tenant} do
+ m =
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now(),
+ "event" => "event",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "message"}
+ })
+
+ {:ok, node} = Clustered.start()
+
+ # Call remote node passing the database connection that is local to this node
+ assert :erpc.call(node, Messages, :replay, [conn, tenant.external_id, "test", 0, 30]) ==
+ {:ok, [m], MapSet.new([m.id])}
+ end
- to_keep =
- Enum.reject(
- messages,
- &(NaiveDateTime.compare(limit, &1.inserted_at) == :gt)
- )
+ test "distributed replay error", %{tenant: tenant} do
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now(),
+ "event" => "event",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "message"}
+ })
- assert :ok = Messages.delete_old_messages(conn)
- {:ok, current} = Repo.all(conn, from(m in Message), Message)
+ {:ok, node} = Clustered.start()
- assert Enum.sort(current) == Enum.sort(to_keep)
+ # Call remote node passing the database connection that is local to this node
+ pid = spawn(fn -> :ok end)
+
+ assert :erpc.call(node, Messages, :replay, [pid, tenant.external_id, "test", 0, 30]) ==
+ {:error, :failed_to_replay_messages}
+ end
end
+
+ describe "delete_old_messages/1" do
+ test "delete_old_messages/1 deletes messages older than 72 hours", %{
+ conn: conn,
+ tenant: tenant,
+ date_start: date_start,
+ date_end: date_end
+ } do
+ utc_now = NaiveDateTime.utc_now()
+ limit = NaiveDateTime.add(utc_now, -72, :hour)
+
+ messages =
+ for date <- Date.range(date_start, date_end) do
+ inserted_at = date |> NaiveDateTime.new!(Time.new!(0, 0, 0))
+ message_fixture(tenant, %{inserted_at: inserted_at})
+ end
+
+ assert length(messages) == 11
+
+ to_keep =
+ Enum.reject(
+ messages,
+ &(NaiveDateTime.compare(NaiveDateTime.beginning_of_day(limit), &1.inserted_at) == :gt)
+ )
+
+ assert :ok = Messages.delete_old_messages(conn)
+ {:ok, current} = Repo.all(conn, from(m in Message), Message)
+
+ assert Enum.sort(current) == Enum.sort(to_keep)
+ end
+ end
+
+ def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {:telemetry, event, measures, metadata})
end
diff --git a/test/realtime/metrics_cleaner_test.exs b/test/realtime/metrics_cleaner_test.exs
index fbe9d8515..a071f72b4 100644
--- a/test/realtime/metrics_cleaner_test.exs
+++ b/test/realtime/metrics_cleaner_test.exs
@@ -8,11 +8,9 @@ defmodule Realtime.MetricsCleanerTest do
setup do
interval = Application.get_env(:realtime, :metrics_cleaner_schedule_timer_in_ms)
Application.put_env(:realtime, :metrics_cleaner_schedule_timer_in_ms, 100)
- tenant = Containers.checkout_tenant(run_migrations: true)
+ on_exit(fn -> Application.put_env(:realtime, :metrics_cleaner_schedule_timer_in_ms, interval) end)
- on_exit(fn ->
- Application.put_env(:realtime, :metrics_cleaner_schedule_timer_in_ms, interval)
- end)
+ tenant = Containers.checkout_tenant(run_migrations: true)
%{tenant: tenant}
end
@@ -24,22 +22,30 @@ defmodule Realtime.MetricsCleanerTest do
# Wait for promex to collect the metrics
Process.sleep(6000)
- Realtime.Telemetry.execute(
+ :telemetry.execute(
[:realtime, :connections],
%{connected: 10, connected_cluster: 10, limit: 100},
%{tenant: external_id}
)
- assert Realtime.PromEx.Metrics
- |> :ets.select([{{{:_, %{tenant: :"$1"}}, :_}, [], [:"$1"]}])
- |> Enum.any?(&(&1 == external_id))
+ :telemetry.execute(
+ [:realtime, :connections],
+ %{connected: 20, connected_cluster: 20, limit: 100},
+ %{tenant: "disconnected-tenant"}
+ )
- Connect.shutdown(external_id)
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+
+ assert String.contains?(metrics, external_id)
+ assert String.contains?(metrics, "disconnected-tenant")
+
+ # Wait for clenaup to run
Process.sleep(200)
- refute Realtime.PromEx.Metrics
- |> :ets.select([{{{:_, %{tenant: :"$1"}}, :_}, [], [:"$1"]}])
- |> Enum.any?(&(&1 == external_id))
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+
+ assert String.contains?(metrics, external_id)
+ refute String.contains?(metrics, "disconnected-tenant")
end
end
end
diff --git a/test/realtime/monitoring/distributed_metrics_test.exs b/test/realtime/monitoring/distributed_metrics_test.exs
index 491083973..49fe4af6f 100644
--- a/test/realtime/monitoring/distributed_metrics_test.exs
+++ b/test/realtime/monitoring/distributed_metrics_test.exs
@@ -15,7 +15,7 @@ defmodule Realtime.DistributedMetricsTest do
^node => %{
pid: _pid,
port: _port,
- queue_size: {:ok, 0},
+ queue_size: {:ok, _},
state: :up,
inet_stats: [
recv_oct: _,
diff --git a/test/realtime/monitoring/erl_sys_mon_test.exs b/test/realtime/monitoring/erl_sys_mon_test.exs
index b1e122d58..e9c7b87b7 100644
--- a/test/realtime/monitoring/erl_sys_mon_test.exs
+++ b/test/realtime/monitoring/erl_sys_mon_test.exs
@@ -5,16 +5,25 @@ defmodule Realtime.Monitoring.ErlSysMonTest do
describe "system monitoring" do
test "logs system monitor events" do
- start_supervised!({ErlSysMon, config: [{:long_message_queue, {1, 10}}]})
+ start_supervised!({ErlSysMon, config: [{:long_message_queue, {1, 100}}]})
- assert capture_log(fn ->
- Task.async(fn ->
- Enum.map(1..1000, &send(self(), &1))
- # Wait for ErlSysMon to notice
- Process.sleep(4000)
- end)
- |> Task.await()
- end) =~ "Realtime.ErlSysMon message:"
+ log =
+ capture_log(fn ->
+ Task.async(fn ->
+ Process.register(self(), TestProcess)
+ Enum.map(1..1000, &send(self(), &1))
+ # Wait for ErlSysMon to notice
+ Process.sleep(4000)
+ end)
+ |> Task.await()
+ end)
+
+ assert log =~ "Realtime.ErlSysMon message:"
+ assert log =~ "$initial_call\", {Realtime.Monitoring.ErlSysMonTest"
+ assert log =~ "ancestors\", [#{inspect(self())}]"
+ assert log =~ "registered_name: TestProcess"
+ assert log =~ "message_queue_len: "
+ assert log =~ "total_heap_size: "
end
end
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/distributed_test.exs b/test/realtime/monitoring/prom_ex/plugins/distributed_test.exs
index ff4c4f098..731873066 100644
--- a/test/realtime/monitoring/prom_ex/plugins/distributed_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/distributed_test.exs
@@ -23,55 +23,41 @@ defmodule Realtime.PromEx.Plugins.DistributedTest do
describe "pooling metrics" do
setup do
- metrics =
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
-
- %{metrics: metrics}
+ %{metrics: PromEx.get_metrics(MetricsTest)}
end
test "send_pending_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/dist_send_pending_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) == 0
+ assert metric_value(metrics, "dist_send_pending_bytes", origin_node: node(), target_node: node) == 0
end
test "send_count", %{metrics: metrics, node: node} do
- pattern = ~r/dist_send_count{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "dist_send_count", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "send_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/dist_send_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "dist_send_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "recv_count", %{metrics: metrics, node: node} do
- pattern = ~r/dist_recv_count{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "dist_recv_count", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "recv_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/dist_recv_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "dist_recv_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "queue_size", %{metrics: metrics, node: node} do
- pattern = ~r/dist_queue_size{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert is_integer(metric_value(metrics, pattern))
+ assert is_integer(metric_value(metrics, "dist_queue_size", origin_node: node(), target_node: node))
end
end
- defp metric_value(metrics, pattern) do
- metrics
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(pattern, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
- end
+ defp metric_value(metrics, metric, expected_tags), do: MetricsHelper.search(metrics, metric, expected_tags)
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/gen_rpc_test.exs b/test/realtime/monitoring/prom_ex/plugins/gen_rpc_test.exs
index 25d8fae16..5396aae6b 100644
--- a/test/realtime/monitoring/prom_ex/plugins/gen_rpc_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/gen_rpc_test.exs
@@ -23,55 +23,42 @@ defmodule Realtime.PromEx.Plugins.GenRpcTest do
describe "pooling metrics" do
setup do
- metrics =
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
-
- %{metrics: metrics}
+ %{metrics: PromEx.get_metrics(MetricsTest)}
end
test "send_pending_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_send_pending_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) == 0
+ assert metric_value(metrics, "gen_rpc_send_pending_bytes", origin_node: node(), target_node: node) == 0
end
test "send_count", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_send_count{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "gen_rpc_send_count", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "send_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_send_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "gen_rpc_send_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "recv_count", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_recv_count{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "gen_rpc_recv_count", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "recv_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_recv_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "gen_rpc_recv_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "queue_size", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_queue_size_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) == 0
+ value = metric_value(metrics, "gen_rpc_queue_size_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
end
end
- defp metric_value(metrics, pattern) do
- metrics
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(pattern, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
- end
+ defp metric_value(metrics, metric, expected_tags), do: MetricsHelper.search(metrics, metric, expected_tags)
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/phoenix_test.exs b/test/realtime/monitoring/prom_ex/plugins/phoenix_test.exs
index a73e6e2f5..fefde8dc3 100644
--- a/test/realtime/monitoring/prom_ex/plugins/phoenix_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/phoenix_test.exs
@@ -1,6 +1,7 @@
defmodule Realtime.PromEx.Plugins.PhoenixTest do
use Realtime.DataCase, async: false
alias Realtime.PromEx.Plugins
+ alias Realtime.Integration.WebsocketClient
defmodule MetricsTest do
use PromEx, otp_app: :realtime_test_phoenix
@@ -10,34 +11,79 @@ defmodule Realtime.PromEx.Plugins.PhoenixTest do
end
end
+ setup_all do
+ start_supervised!(MetricsTest)
+ :ok
+ end
+
+ setup do
+ %{tenant: Containers.checkout_tenant(run_migrations: true)}
+ end
+
describe "pooling metrics" do
- setup do
- start_supervised!(MetricsTest)
- :ok
+ test "number of connections", %{tenant: tenant} do
+ {:ok, token} = token_valid(tenant, "anon", %{})
+
+ {:ok, _} =
+ WebsocketClient.connect(
+ self(),
+ uri(tenant, Phoenix.Socket.V1.JSONSerializer, 4002),
+ Phoenix.Socket.V1.JSONSerializer,
+ [{"x-api-key", token}]
+ )
+
+ {:ok, _} =
+ WebsocketClient.connect(
+ self(),
+ uri(tenant, Phoenix.Socket.V1.JSONSerializer, 4002),
+ Phoenix.Socket.V1.JSONSerializer,
+ [{"x-api-key", token}]
+ )
+
+ Process.sleep(200)
+ assert metric_value("phoenix_connections_total") >= 2
end
+ end
+
+ describe "event metrics" do
+ test "socket connected", %{tenant: tenant} do
+ {:ok, token} = token_valid(tenant, "anon", %{})
- test "number of connections" do
- # Trigger a connection by making a request to the endpoint
- url = RealtimeWeb.Endpoint.url() <> "/healthcheck"
- Req.get!(url)
+ {:ok, _} =
+ WebsocketClient.connect(
+ self(),
+ uri(tenant, Phoenix.Socket.V1.JSONSerializer, 4002),
+ Phoenix.Socket.V1.JSONSerializer,
+ [{"x-api-key", token}]
+ )
+
+ {:ok, _} =
+ WebsocketClient.connect(
+ self(),
+ uri(tenant, RealtimeWeb.Socket.V2Serializer, 4002),
+ RealtimeWeb.Socket.V2Serializer,
+ [{"x-api-key", token}]
+ )
Process.sleep(200)
- assert metric_value() > 0
+
+ assert metric_value("phoenix_socket_connected_duration_milliseconds_count",
+ endpoint: "RealtimeWeb.Endpoint",
+ result: "ok",
+ serializer: "Elixir.Phoenix.Socket.V1.JSONSerializer",
+ transport: "websocket"
+ ) >= 1
+
+ assert metric_value("phoenix_socket_connected_duration_milliseconds_count",
+ endpoint: "RealtimeWeb.Endpoint",
+ result: "ok",
+ serializer: "Elixir.RealtimeWeb.Socket.V2Serializer",
+ transport: "websocket"
+ ) >= 1
end
end
- defp metric_value() do
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(~r/phoenix_connections_total\s(?\d+)/, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
+ defp metric_value(metric, expected_tags \\ nil) do
+ MetricsHelper.search(PromEx.get_metrics(MetricsTest), metric, expected_tags)
end
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/tenant_test.exs b/test/realtime/monitoring/prom_ex/plugins/tenant_test.exs
index 164c8d2eb..84ca9b1fb 100644
--- a/test/realtime/monitoring/prom_ex/plugins/tenant_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/tenant_test.exs
@@ -1,12 +1,14 @@
defmodule Realtime.PromEx.Plugins.TenantTest do
- alias Realtime.Tenants.Authorization.Policies
use Realtime.DataCase, async: false
alias Realtime.PromEx.Plugins.Tenant
alias Realtime.Rpc
- alias Realtime.UsersCounter
- alias Realtime.Tenants.Authorization.Policies
alias Realtime.Tenants.Authorization
+ alias Realtime.Tenants.Authorization.Policies
+ alias Realtime.Tenants.Authorization.Policies
+ alias Realtime.UsersCounter
+ alias Realtime.RateCounter
+ alias Realtime.GenCounter
defmodule MetricsTest do
use PromEx, otp_app: :realtime_test_phoenix
@@ -15,6 +17,11 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
def plugins, do: [{Tenant, poll_rate: 50}]
end
+ setup_all do
+ start_supervised!(MetricsTest)
+ :ok
+ end
+
def handle_telemetry(event, metadata, content, pid: pid), do: send(pid, {event, metadata, content})
@aux_mod (quote do
@@ -24,45 +31,52 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
end
def fake_db_event(external_id) do
- external_id
- |> Realtime.Tenants.db_events_per_second_rate()
- |> Realtime.RateCounter.new()
+ rate = Realtime.Tenants.db_events_per_second_rate(external_id, 100)
- external_id
- |> Realtime.Tenants.db_events_per_second_key()
- |> Realtime.GenCounter.add()
+ rate
+ |> tap(&RateCounter.new(&1))
+ |> tap(&GenCounter.add(&1.id))
+ |> RateCounterHelper.tick!()
end
def fake_event(external_id) do
- external_id
- |> Realtime.Tenants.events_per_second_rate(123)
- |> Realtime.RateCounter.new()
+ rate = Realtime.Tenants.events_per_second_rate(external_id, 123)
- external_id
- |> Realtime.Tenants.events_per_second_key()
- |> Realtime.GenCounter.add()
+ rate
+ |> tap(&RateCounter.new(&1))
+ |> tap(&GenCounter.add(&1.id))
+ |> RateCounterHelper.tick!()
end
def fake_presence_event(external_id) do
- external_id
- |> Realtime.Tenants.presence_events_per_second_rate(123)
- |> Realtime.RateCounter.new()
+ rate = Realtime.Tenants.presence_events_per_second_rate(external_id, 123)
- external_id
- |> Realtime.Tenants.presence_events_per_second_key()
- |> Realtime.GenCounter.add()
+ rate
+ |> tap(&RateCounter.new(&1))
+ |> tap(&GenCounter.add(&1.id))
+ |> RateCounterHelper.tick!()
end
def fake_broadcast_from_database(external_id) do
Realtime.Telemetry.execute(
[:realtime, :tenants, :broadcast_from_database],
%{
- latency_committed_at: 10,
- latency_inserted_at: 1
+ # millisecond
+ latency_committed_at: 9,
+ # microsecond
+ latency_inserted_at: 9000
},
%{tenant: external_id}
)
end
+
+ def fake_input_bytes(external_id) do
+ Realtime.Telemetry.execute([:realtime, :channel, :input_bytes], %{size: 10}, %{tenant: external_id})
+ end
+
+ def fake_output_bytes(external_id) do
+ Realtime.Telemetry.execute([:realtime, :channel, :output_bytes], %{size: 10}, %{tenant: external_id})
+ end
end
end)
@@ -75,6 +89,7 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
on_exit(fn -> :telemetry.detach(__MODULE__) end)
+ {:ok, _} = Realtime.Tenants.Connect.lookup_or_start_connection(tenant.external_id)
{:ok, node} = Clustered.start(@aux_mod)
%{tenant: tenant, node: node}
end
@@ -85,16 +100,19 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
} do
UsersCounter.add(self(), external_id)
# Add bad tenant id
- UsersCounter.add(self(), random_string())
+ bad_tenant_id = random_string()
+ UsersCounter.add(self(), bad_tenant_id)
_ = Rpc.call(node, FakeUserCounter, :fake_add, [external_id])
+
Process.sleep(500)
Tenant.execute_tenant_metrics()
assert_receive {[:realtime, :connections], %{connected: 1, limit: 200, connected_cluster: 2},
%{tenant: ^external_id}}
- refute_receive :_
+ refute_receive {[:realtime, :connections], %{connected: 1, limit: 200, connected_cluster: 2},
+ %{tenant: ^bad_tenant_id}}
end
end
@@ -113,47 +131,59 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
role: "anon"
})
- start_supervised!(MetricsTest)
-
%{authorization_context: authorization_context, db_conn: db_conn, tenant: tenant}
end
test "event exists after counter added", %{tenant: %{external_id: external_id}} do
- pattern =
- ~r/realtime_channel_events{tenant="#{external_id}"}\s(?\d+)/
+ metric_value = metric_value("realtime_channel_events", tenant: external_id) || 0
+ FakeUserCounter.fake_event(external_id)
+
+ Process.sleep(100)
+ assert metric_value("realtime_channel_events", tenant: external_id) == metric_value + 1
+ end
+
+ test "global event exists after counter added", %{tenant: %{external_id: external_id}} do
+ metric_value = metric_value("realtime_channel_global_events") || 0
- metric_value = metric_value(pattern)
FakeUserCounter.fake_event(external_id)
- Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ Process.sleep(100)
+ assert metric_value("realtime_channel_global_events") == metric_value + 1
end
test "db_event exists after counter added", %{tenant: %{external_id: external_id}} do
- pattern =
- ~r/realtime_channel_db_events{tenant="#{external_id}"}\s(?\d+)/
+ metric_value = metric_value("realtime_channel_db_events", tenant: external_id) || 0
+ FakeUserCounter.fake_db_event(external_id)
+ Process.sleep(100)
+ assert metric_value("realtime_channel_db_events", tenant: external_id) == metric_value + 1
+ end
+
+ test "global db_event exists after counter added", %{tenant: %{external_id: external_id}} do
+ metric_value = metric_value("realtime_channel_global_db_events") || 0
- metric_value = metric_value(pattern)
FakeUserCounter.fake_db_event(external_id)
- Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ Process.sleep(100)
+ assert metric_value("realtime_channel_global_db_events") == metric_value + 1
end
test "presence_event exists after counter added", %{tenant: %{external_id: external_id}} do
- pattern =
- ~r/realtime_channel_presence_events{tenant="#{external_id}"}\s(?\d+)/
+ metric_value = metric_value("realtime_channel_presence_events", tenant: external_id) || 0
- metric_value = metric_value(pattern)
FakeUserCounter.fake_presence_event(external_id)
- Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ Process.sleep(100)
+ assert metric_value("realtime_channel_presence_events", tenant: external_id) == metric_value + 1
end
- test "metric read_authorization_check exists after check", context do
- pattern =
- ~r/realtime_tenants_read_authorization_check_count{tenant="#{context.tenant.external_id}"}\s(?\d+)/
+ test "global presence_event exists after counter added", %{tenant: %{external_id: external_id}} do
+ metric_value = metric_value("realtime_channel_global_presence_events") || 0
+ FakeUserCounter.fake_presence_event(external_id)
+ Process.sleep(100)
+ assert metric_value("realtime_channel_global_presence_events") == metric_value + 1
+ end
- metric_value = metric_value(pattern)
+ test "metric read_authorization_check exists after check", context do
+ metric = "realtime_tenants_read_authorization_check_count"
+ metric_value = metric_value(metric, tenant: context.tenant.external_id) || 0
{:ok, _} =
Authorization.get_read_authorizations(
@@ -164,19 +194,17 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
-
- bucket_pattern =
- ~r/realtime_tenants_read_authorization_check_bucket{tenant="#{context.tenant.external_id}",le="250"}\s(?\d+)/
+ assert metric_value(metric, tenant: context.tenant.external_id) == metric_value + 1
- assert metric_value(bucket_pattern) > 0
+ assert metric_value("realtime_tenants_read_authorization_check_bucket",
+ tenant: context.tenant.external_id,
+ le: "250.0"
+ ) > 0
end
test "metric write_authorization_check exists after check", context do
- pattern =
- ~r/realtime_tenants_write_authorization_check_count{tenant="#{context.tenant.external_id}"}\s(?\d+)/
-
- metric_value = metric_value(pattern)
+ metric = "realtime_tenants_write_authorization_check_count"
+ metric_value = metric_value(metric, tenant: context.tenant.external_id) || 0
{:ok, _} =
Authorization.get_write_authorizations(
@@ -188,96 +216,110 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
# Wait enough time for the poll rate to be triggered at least once
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ assert metric_value(metric, tenant: context.tenant.external_id) == metric_value + 1
+
+ assert metric_value("realtime_tenants_write_authorization_check_bucket",
+ tenant: context.tenant.external_id,
+ le: "250.0"
+ ) > 0
+ end
+
+ test "metric replay exists after check", context do
+ external_id = context.tenant.external_id
+ metric = "realtime_tenants_replay_count"
+ metric_value = metric_value(metric, tenant: external_id) || 0
+
+ assert {:ok, _, _} = Realtime.Messages.replay(context.db_conn, external_id, "test", 0, 1)
- bucket_pattern =
- ~r/realtime_tenants_write_authorization_check_bucket{tenant="#{context.tenant.external_id}",le="250"}\s(?\d+)/
+ # Wait enough time for the poll rate to be triggered at least once
+ Process.sleep(200)
- assert metric_value(bucket_pattern) > 0
+ assert metric_value(metric, tenant: external_id) == metric_value + 1
+
+ assert metric_value("realtime_tenants_replay_bucket", tenant: external_id, le: "250.0") > 0
end
test "metric realtime_tenants_broadcast_from_database_latency_committed_at exists after check", context do
- pattern =
- ~r/realtime_tenants_broadcast_from_database_latency_committed_at_count{tenant="#{context.tenant.external_id}"}\s(?\d+)/
+ external_id = context.tenant.external_id
+ metric = "realtime_tenants_broadcast_from_database_latency_committed_at_count"
+ metric_value = metric_value(metric, tenant: external_id) || 0
- metric_value = metric_value(pattern)
FakeUserCounter.fake_broadcast_from_database(context.tenant.external_id)
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ assert metric_value(metric, tenant: external_id) == metric_value + 1
- bucket_pattern =
- ~r/realtime_tenants_broadcast_from_database_latency_committed_at_bucket{tenant="#{context.tenant.external_id}",le="10"}\s(?\d+)/
-
- assert metric_value(bucket_pattern) > 0
+ assert metric_value("realtime_tenants_broadcast_from_database_latency_committed_at_bucket",
+ tenant: external_id,
+ le: "10.0"
+ ) > 0
end
test "metric realtime_tenants_broadcast_from_database_latency_inserted_at exists after check", context do
- pattern =
- ~r/realtime_tenants_broadcast_from_database_latency_inserted_at_count{tenant="#{context.tenant.external_id}"}\s(?\d+)/
-
- metric_value = metric_value(pattern)
+ external_id = context.tenant.external_id
+ metric = "realtime_tenants_broadcast_from_database_latency_inserted_at_count"
+ metric_value = metric_value(metric, tenant: external_id) || 0
FakeUserCounter.fake_broadcast_from_database(context.tenant.external_id)
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
-
- bucket_pattern =
- ~r/realtime_tenants_broadcast_from_database_latency_inserted_at_bucket{tenant="#{context.tenant.external_id}",le="5"}\s(?\d+)/
+ assert metric_value(metric, tenant: external_id) == metric_value + 1
- assert metric_value(bucket_pattern) > 0
+ assert metric_value("realtime_tenants_broadcast_from_database_latency_inserted_at_bucket",
+ tenant: external_id,
+ le: "10.0"
+ ) > 0
end
test "tenant metric payload size", context do
external_id = context.tenant.external_id
-
- pattern =
- ~r/realtime_tenants_payload_size_count{tenant="#{external_id}"}\s(?\d+)/
-
- metric_value = metric_value(pattern)
+ metric = "realtime_tenants_payload_size_count"
+ metric_value = metric_value(metric, message_type: "presence", tenant: external_id) || 0
message = %{topic: "a topic", event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
- RealtimeWeb.TenantBroadcaster.pubsub_broadcast(external_id, "a topic", message, Phoenix.PubSub)
+ RealtimeWeb.TenantBroadcaster.pubsub_broadcast(external_id, "a topic", message, Phoenix.PubSub, :presence)
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
-
- bucket_pattern =
- ~r/realtime_tenants_payload_size_bucket{tenant="#{external_id}",le="100"}\s(?\d+)/
+ assert metric_value(metric, message_type: "presence", tenant: external_id) == metric_value + 1
- assert metric_value(bucket_pattern) > 0
+ assert metric_value("realtime_tenants_payload_size_bucket", tenant: external_id, le: "250") > 0
end
test "global metric payload size", context do
external_id = context.tenant.external_id
- pattern = ~r/realtime_payload_size_count\s(?\d+)/
-
- metric_value = metric_value(pattern)
+ metric = "realtime_payload_size_count"
+ metric_value = metric_value(metric, message_type: "broadcast") || 0
message = %{topic: "a topic", event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
- RealtimeWeb.TenantBroadcaster.pubsub_broadcast(external_id, "a topic", message, Phoenix.PubSub)
+ RealtimeWeb.TenantBroadcaster.pubsub_broadcast(external_id, "a topic", message, Phoenix.PubSub, :broadcast)
+
+ Process.sleep(200)
+ assert metric_value(metric, message_type: "broadcast") == metric_value + 1
+
+ assert metric_value("realtime_payload_size_bucket", le: "250.0") > 0
+ end
+
+ test "channel input bytes", context do
+ external_id = context.tenant.external_id
+
+ FakeUserCounter.fake_input_bytes(external_id)
+ FakeUserCounter.fake_input_bytes(external_id)
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ assert metric_value("realtime_channel_input_bytes", tenant: external_id) == 20
+ end
- bucket_pattern = ~r/realtime_payload_size_bucket{le="100"}\s(?\d+)/
+ test "channel output bytes", context do
+ external_id = context.tenant.external_id
+
+ FakeUserCounter.fake_output_bytes(external_id)
+ FakeUserCounter.fake_output_bytes(external_id)
- assert metric_value(bucket_pattern) > 0
+ Process.sleep(200)
+ assert metric_value("realtime_channel_output_bytes", tenant: external_id) == 20
end
end
- defp metric_value(pattern) do
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(pattern, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
+ defp metric_value(metric, expected_tags \\ nil) do
+ MetricsHelper.search(PromEx.get_metrics(MetricsTest), metric, expected_tags)
end
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/tenants_test.exs b/test/realtime/monitoring/prom_ex/plugins/tenants_test.exs
index 080fd3cfb..4ebd99388 100644
--- a/test/realtime/monitoring/prom_ex/plugins/tenants_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/tenants_test.exs
@@ -20,118 +20,107 @@ defmodule Realtime.PromEx.Plugins.TenantsTest do
def exception, do: raise(RuntimeError)
end
- setup do
- local_tenant = Containers.checkout_tenant(run_migrations: true)
+ setup_all do
start_supervised!(MetricsTest)
- {:ok, %{tenant: local_tenant}}
+ :ok
end
describe "event_metrics erpc" do
- test "success" do
- pattern = ~r/realtime_rpc_count{mechanism=\"erpc\",success="true",tenant="123"}\s(?\d+)/
+ setup do
+ %{tenant: random_string()}
+ end
+
+ test "global success", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
- assert {:ok, "success"} = Rpc.enhanced_call(node(), Test, :success, [], tenant_id: "123")
+ previous_value = metric_value(metric, mechanism: "erpc", success: true) || 0
+ assert {:ok, "success"} = Rpc.enhanced_call(node(), Test, :success, [], tenant_id: tenant)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "erpc", success: true) == previous_value + 1
end
- test "failure" do
- pattern = ~r/realtime_rpc_count{mechanism=\"erpc\",success="false",tenant="123"}\s(?\d+)/
+ test "global failure", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
- assert {:error, "failure"} = Rpc.enhanced_call(node(), Test, :failure, [], tenant_id: "123")
+ previous_value = metric_value(metric, mechanism: "erpc", success: false) || 0
+ assert {:error, "failure"} = Rpc.enhanced_call(node(), Test, :failure, [], tenant_id: tenant)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "erpc", success: false) == previous_value + 1
end
- test "exception" do
- pattern = ~r/realtime_rpc_count{mechanism=\"erpc\",success="false",tenant="123"}\s(?\d+)/
+ test "global exception", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
+ previous_value = metric_value(metric, mechanism: "erpc", success: false) || 0
assert {:error, :rpc_error, %RuntimeError{message: "runtime error"}} =
- Rpc.enhanced_call(node(), Test, :exception, [], tenant_id: "123")
+ Rpc.enhanced_call(node(), Test, :exception, [], tenant_id: tenant)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "erpc", success: false) == previous_value + 1
end
end
- test "event_metrics rpc" do
- pattern = ~r/realtime_rpc_count{mechanism=\"rpc\",success="",tenant="123"}\s(?\d+)/
- # Enough time for the poll rate to be triggered at least once
- Process.sleep(200)
- previous_value = metric_value(pattern)
- assert {:ok, "success"} = Rpc.call(node(), Test, :success, [], tenant_id: "123")
- Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
- end
-
describe "event_metrics gen_rpc" do
- test "success" do
- pattern = ~r/realtime_rpc_count{mechanism=\"gen_rpc\",success="true",tenant="123"}\s(?\d+)/
+ setup do
+ %{tenant: random_string()}
+ end
+
+ test "global success", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
- assert GenRpc.multicall(Test, :success, [], tenant_id: "123") == [{node(), {:ok, "success"}}]
+ previous_value = metric_value(metric, mechanism: "gen_rpc", success: true) || 0
+ assert GenRpc.multicall(Test, :success, [], tenant_id: tenant) == [{node(), {:ok, "success"}}]
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "gen_rpc", success: true) == previous_value + 1
end
- test "failure" do
- pattern = ~r/realtime_rpc_count{mechanism=\"gen_rpc\",success="false",tenant="123"}\s(?\d+)/
+ test "global failure", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
- assert GenRpc.multicall(Test, :failure, [], tenant_id: "123") == [{node(), {:error, "failure"}}]
+ previous_value = metric_value(metric, mechanism: "gen_rpc", success: false) || 0
+ assert GenRpc.multicall(Test, :failure, [], tenant_id: tenant) == [{node(), {:error, "failure"}}]
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "gen_rpc", success: false) == previous_value + 1
end
- test "exception" do
- pattern = ~r/realtime_rpc_count{mechanism=\"gen_rpc\",success="false",tenant="123"}\s(?\d+)/
+ test "global exception", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
-
+ previous_value = metric_value(metric, mechanism: "gen_rpc", success: false) || 0
node = node()
assert assert [{^node, {:error, :rpc_error, {:EXIT, {%RuntimeError{message: "runtime error"}, _stacktrace}}}}] =
- GenRpc.multicall(Test, :exception, [], tenant_id: "123")
+ GenRpc.multicall(Test, :exception, [], tenant_id: tenant)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "gen_rpc", success: false) == previous_value + 1
end
end
describe "pooling metrics" do
+ setup do
+ local_tenant = Containers.checkout_tenant(run_migrations: true)
+ {:ok, %{tenant: local_tenant}}
+ end
+
test "conneted based on Connect module information for local node only", %{tenant: tenant} do
- pattern = ~r/realtime_tenants_connected\s(?\d+)/
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
+ previous_value = metric_value("realtime_tenants_connected")
{:ok, _} = Connect.lookup_or_start_connection(tenant.external_id)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value("realtime_tenants_connected") == previous_value + 1
end
end
- defp metric_value(pattern) do
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(pattern, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
+ defp metric_value(metric, expected_tags \\ nil) do
+ MetricsHelper.search(PromEx.get_metrics(MetricsTest), metric, expected_tags)
end
end
diff --git a/test/realtime/monitoring/prom_ex_test.exs b/test/realtime/monitoring/prom_ex_test.exs
index 849536543..a466e5efd 100644
--- a/test/realtime/monitoring/prom_ex_test.exs
+++ b/test/realtime/monitoring/prom_ex_test.exs
@@ -5,7 +5,7 @@ defmodule Realtime.PromExTest do
describe "get_metrics/0" do
test "builds metrics in prometheus format which includes host region and id" do
- metrics = PromEx.get_metrics()
+ metrics = PromEx.get_metrics() |> IO.iodata_to_binary()
assert String.contains?(
metrics,
@@ -16,27 +16,7 @@ defmodule Realtime.PromExTest do
assert String.contains?(
metrics,
- "beam_system_schedulers_online_info{host=\"nohost\",region=\"us-east-1\",id=\"nohost\"}"
- )
- end
- end
-
- describe "get_compressed_metrics/0" do
- test "builds metrics compressed using zlib" do
- compressed_metrics = PromEx.get_compressed_metrics()
-
- metrics = :zlib.uncompress(compressed_metrics)
-
- assert String.contains?(
- metrics,
- "# HELP beam_system_schedulers_online_info The number of scheduler threads that are online."
- )
-
- assert String.contains?(metrics, "# TYPE beam_system_schedulers_online_info gauge")
-
- assert String.contains?(
- metrics,
- "beam_system_schedulers_online_info{host=\"nohost\",region=\"us-east-1\",id=\"nohost\"}"
+ "beam_system_schedulers_online_info{host=\"nohost\",id=\"nohost\",region=\"us-east-1\"}"
)
end
end
diff --git a/test/realtime/monitoring/prometheus_test.exs b/test/realtime/monitoring/prometheus_test.exs
new file mode 100644
index 000000000..ca7563ce0
--- /dev/null
+++ b/test/realtime/monitoring/prometheus_test.exs
@@ -0,0 +1,434 @@
+# Based on https://github.com/rkallos/peep/blob/708546ed069aebdf78ac1f581130332bd2e8b5b1/test/prometheus_test.exs
+defmodule Realtime.Monitoring.PrometheusTest do
+ use ExUnit.Case, async: true
+
+ alias Realtime.Monitoring.Prometheus
+ alias Telemetry.Metrics
+
+ defmodule StorageCounter do
+ @moduledoc false
+ use Agent
+
+ def start() do
+ Agent.start(fn -> 0 end, name: __MODULE__)
+ end
+
+ def fresh_id() do
+ Agent.get_and_update(__MODULE__, fn i -> {:"#{i}", i + 1} end)
+ end
+ end
+
+ # Test struct that doesn't implement String.Chars
+ defmodule TestError do
+ defstruct [:reason, :code]
+ end
+
+ setup_all do
+ StorageCounter.start()
+ :ok
+ end
+
+ @impls [:default, {:default, 4}, :striped]
+
+ for impl <- @impls do
+ test "#{inspect(impl)} - counter formatting" do
+ counter = Metrics.counter("prometheus.test.counter", description: "a counter")
+ name = StorageCounter.fresh_id()
+
+ opts = [
+ name: name,
+ metrics: [counter],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, counter, 1, %{foo: :bar, baz: "quux"})
+
+ expected = [
+ "# HELP prometheus_test_counter a counter",
+ "# TYPE prometheus_test_counter counter",
+ ~s(prometheus_test_counter{baz="quux",foo="bar"} 1)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ describe "#{inspect(impl)} - sum" do
+ test "sum formatting" do
+ name = StorageCounter.fresh_id()
+ sum = Metrics.sum("prometheus.test.sum", description: "a sum")
+
+ opts = [
+ name: name,
+ metrics: [sum],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, sum, 5, %{foo: :bar, baz: "quux"})
+ Peep.insert_metric(name, sum, 3, %{foo: :bar, baz: "quux"})
+
+ expected = [
+ "# HELP prometheus_test_sum a sum",
+ "# TYPE prometheus_test_sum counter",
+ ~s(prometheus_test_sum{baz="quux",foo="bar"} 8)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "custom type" do
+ name = StorageCounter.fresh_id()
+
+ sum =
+ Metrics.sum("prometheus.test.sum",
+ description: "a sum",
+ reporter_options: [prometheus_type: "gauge"]
+ )
+
+ opts = [
+ name: name,
+ metrics: [sum],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, sum, 5, %{foo: :bar, baz: "quux"})
+ Peep.insert_metric(name, sum, 3, %{foo: :bar, baz: "quux"})
+
+ expected = [
+ "# HELP prometheus_test_sum a sum",
+ "# TYPE prometheus_test_sum gauge",
+ ~s(prometheus_test_sum{baz="quux",foo="bar"} 8)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+ end
+
+ describe "#{inspect(impl)} - last_value" do
+ test "formatting" do
+ name = StorageCounter.fresh_id()
+ last_value = Metrics.last_value("prometheus.test.gauge", description: "a last_value")
+
+ opts = [
+ name: name,
+ metrics: [last_value],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, last_value, 5, %{blee: :bloo, flee: "floo"})
+
+ expected = [
+ "# HELP prometheus_test_gauge a last_value",
+ "# TYPE prometheus_test_gauge gauge",
+ ~s(prometheus_test_gauge{blee="bloo",flee="floo"} 5)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "custom type" do
+ name = StorageCounter.fresh_id()
+
+ last_value =
+ Metrics.last_value("prometheus.test.gauge",
+ description: "a last_value",
+ reporter_options: [prometheus_type: :sum]
+ )
+
+ opts = [
+ name: name,
+ metrics: [last_value],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, last_value, 5, %{blee: :bloo, flee: "floo"})
+
+ expected = [
+ "# HELP prometheus_test_gauge a last_value",
+ "# TYPE prometheus_test_gauge sum",
+ ~s(prometheus_test_gauge{blee="bloo",flee="floo"} 5)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+ end
+
+ test "#{inspect(impl)} - dist formatting" do
+ name = StorageCounter.fresh_id()
+
+ dist =
+ Metrics.distribution("prometheus.test.distribution",
+ description: "a distribution",
+ reporter_options: [max_value: 1000]
+ )
+
+ opts = [
+ name: name,
+ metrics: [dist],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ expected = []
+ assert export(name) == lines_to_string(expected)
+
+ Peep.insert_metric(name, dist, 1, %{glee: :gloo})
+
+ expected = [
+ "# HELP prometheus_test_distribution a distribution",
+ "# TYPE prometheus_test_distribution histogram",
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.222222"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.493827"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.825789"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="2.23152"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="2.727413"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="3.333505"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="4.074283"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="4.97968"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="6.086275"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="7.438781"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="9.091843"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="11.112253"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="13.581642"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="16.599785"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="20.288626"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="24.79721"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="30.307701"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="37.042745"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="45.274466"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="55.335459"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="67.632227"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="82.661611"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="101.030858"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="123.48216"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="150.92264"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="184.461004"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="225.452339"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="275.552858"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="336.786827"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="411.628344"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="503.101309"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="614.9016"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="751.5464"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="918.556711"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1122.680424"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="+Inf"} 1),
+ ~s(prometheus_test_distribution_sum{glee="gloo"} 1),
+ ~s(prometheus_test_distribution_count{glee="gloo"} 1)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+
+ for i <- 2..2000 do
+ Peep.insert_metric(name, dist, i, %{glee: :gloo})
+ end
+
+ expected = [
+ "# HELP prometheus_test_distribution a distribution",
+ "# TYPE prometheus_test_distribution histogram",
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.222222"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.493827"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.825789"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="2.23152"} 2),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="2.727413"} 2),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="3.333505"} 3),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="4.074283"} 4),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="4.97968"} 4),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="6.086275"} 6),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="7.438781"} 7),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="9.091843"} 9),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="11.112253"} 11),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="13.581642"} 13),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="16.599785"} 16),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="20.288626"} 20),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="24.79721"} 24),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="30.307701"} 30),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="37.042745"} 37),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="45.274466"} 45),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="55.335459"} 55),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="67.632227"} 67),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="82.661611"} 82),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="101.030858"} 101),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="123.48216"} 123),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="150.92264"} 150),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="184.461004"} 184),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="225.452339"} 225),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="275.552858"} 275),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="336.786827"} 336),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="411.628344"} 411),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="503.101309"} 503),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="614.9016"} 614),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="751.5464"} 751),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="918.556711"} 918),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1122.680424"} 1122),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="+Inf"} 2000),
+ ~s(prometheus_test_distribution_sum{glee="gloo"} 2001000),
+ ~s(prometheus_test_distribution_count{glee="gloo"} 2000)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "#{inspect(impl)} - dist formatting pow10" do
+ name = StorageCounter.fresh_id()
+
+ dist =
+ Metrics.distribution("prometheus.test.distribution",
+ description: "a distribution",
+ reporter_options: [
+ max_value: 1000,
+ peep_bucket_calculator: Peep.Buckets.PowersOfTen
+ ]
+ )
+
+ opts = [
+ name: name,
+ metrics: [dist],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ expected = []
+ assert export(name) == lines_to_string(expected)
+
+ Peep.insert_metric(name, dist, 1, %{glee: :gloo})
+
+ expected = [
+ "# HELP prometheus_test_distribution a distribution",
+ "# TYPE prometheus_test_distribution histogram",
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="10.0"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="100.0"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e3"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e4"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e5"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e6"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e7"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e8"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e9"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="+Inf"} 1),
+ ~s(prometheus_test_distribution_sum{glee="gloo"} 1),
+ ~s(prometheus_test_distribution_count{glee="gloo"} 1)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+
+ f = fn ->
+ for i <- 1..2000 do
+ Peep.insert_metric(name, dist, i, %{glee: :gloo})
+ end
+ end
+
+ 1..20 |> Enum.map(fn _ -> Task.async(f) end) |> Task.await_many()
+
+ expected =
+ [
+ "# HELP prometheus_test_distribution a distribution",
+ "# TYPE prometheus_test_distribution histogram",
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="10.0"} 181),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="100.0"} 1981),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e3"} 19981),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e4"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e5"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e6"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e7"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e8"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e9"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="+Inf"} 40001),
+ ~s(prometheus_test_distribution_sum{glee="gloo"} 40020001),
+ ~s(prometheus_test_distribution_count{glee="gloo"} 40001)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "#{inspect(impl)} - regression: label escaping" do
+ name = StorageCounter.fresh_id()
+
+ counter =
+ Metrics.counter(
+ "prometheus.test.counter",
+ description: "a counter"
+ )
+
+ opts = [
+ name: name,
+ metrics: [counter],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, counter, 1, %{atom: "\"string\""})
+ Peep.insert_metric(name, counter, 1, %{"\"string\"" => :atom})
+ Peep.insert_metric(name, counter, 1, %{"\"string\"" => "\"string\""})
+ Peep.insert_metric(name, counter, 1, %{"string" => "string\n"})
+
+ expected = [
+ "# HELP prometheus_test_counter a counter",
+ "# TYPE prometheus_test_counter counter",
+ ~s(prometheus_test_counter{atom="\\\"string\\\""} 1),
+ ~s(prometheus_test_counter{\"string\"="atom"} 1),
+ ~s(prometheus_test_counter{\"string\"="\\\"string\\\""} 1),
+ ~s(prometheus_test_counter{string="string\\n"} 1)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "#{inspect(impl)} - regression: handle structs without String.Chars" do
+ name = StorageCounter.fresh_id()
+
+ counter =
+ Metrics.counter(
+ "prometheus.test.counter",
+ description: "a counter"
+ )
+
+ opts = [
+ name: name,
+ metrics: [counter],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ # Create a struct that doesn't implement String.Chars
+ error_struct = %TestError{reason: :tcp_closed, code: 1001}
+
+ Peep.insert_metric(name, counter, 1, %{error: error_struct})
+
+ result = export(name)
+
+ # Should not crash and should contain the inspected struct representation
+ assert result =~ "prometheus_test_counter"
+ assert result =~ "TestError"
+ assert result =~ "tcp_closed"
+ end
+ end
+
+ defp export(name) do
+ Peep.get_all_metrics(name)
+ |> Prometheus.export()
+ |> IO.iodata_to_binary()
+ end
+
+ defp lines_to_string(lines) do
+ lines
+ |> Enum.map(&[&1, ?\n])
+ |> Enum.concat(["# EOF\n"])
+ |> IO.iodata_to_binary()
+ end
+end
diff --git a/test/realtime/nodes_test.exs b/test/realtime/nodes_test.exs
index ba3b6be0e..b127ed605 100644
--- a/test/realtime/nodes_test.exs
+++ b/test/realtime/nodes_test.exs
@@ -4,6 +4,78 @@ defmodule Realtime.NodesTest do
alias Realtime.Nodes
alias Realtime.Tenants
+ defp spawn_fake_node(region, node) do
+ parent = self()
+
+ fun = fn ->
+ :syn.join(RegionNodes, region, self(), node: node)
+ send(parent, :joined)
+
+ receive do
+ :ok -> :ok
+ end
+ end
+
+ {:ok, _pid} = start_supervised({Task, fun}, id: {region, node})
+ assert_receive :joined
+ end
+
+ describe "all_node_regions/0" do
+ test "returns all regions with nodes" do
+ spawn_fake_node("us-east-1", :node_1)
+ spawn_fake_node("ap-2", :node_2)
+ spawn_fake_node("ap-2", :node_3)
+
+ assert Nodes.all_node_regions() |> Enum.sort() == ["ap-2", "us-east-1"]
+ end
+
+ test "with no other nodes, returns my region only" do
+ assert Nodes.all_node_regions() == ["us-east-1"]
+ end
+ end
+
+ describe "region_nodes/1" do
+ test "nil region returns empty list" do
+ assert Nodes.region_nodes(nil) == []
+ end
+
+ test "returns nodes from region" do
+ region = "ap-southeast-2"
+ spawn_fake_node(region, :node_1)
+ spawn_fake_node(region, :node_2)
+
+ spawn_fake_node("eu-west-2", :node_3)
+
+ assert Nodes.region_nodes(region) == [:node_1, :node_2]
+ assert Nodes.region_nodes("eu-west-2") == [:node_3]
+ end
+
+ test "on non-existing region, returns empty list" do
+ assert Nodes.region_nodes("non-existing-region") == []
+ end
+ end
+
+ describe "node_from_region/2" do
+ test "nil region returns error" do
+ assert {:error, :not_available} = Nodes.node_from_region(nil, :any_key)
+ end
+
+ test "empty region returns error" do
+ assert {:error, :not_available} = Nodes.node_from_region("empty-region", :any_key)
+ end
+
+ test "returns the same node given the same key" do
+ region = "ap-southeast-3"
+ spawn_fake_node(region, :node_1)
+ spawn_fake_node(region, :node_2)
+
+ spawn_fake_node("eu-west-3", :node_3)
+
+ assert {:ok, :node_2} = Nodes.node_from_region(region, :key1)
+ assert {:ok, :node_2} = Nodes.node_from_region(region, :key1)
+ end
+ end
+
describe "get_node_for_tenant/1" do
setup do
tenant = Containers.checkout_tenant()
@@ -16,10 +88,7 @@ defmodule Realtime.NodesTest do
reject(&:syn.members/2)
end
- test "on existing tenant id, returns the node for the region using syn", %{
- tenant: tenant,
- region: region
- } do
+ test "on existing tenant id, returns the node for the region using syn", %{tenant: tenant, region: region} do
expected_nodes = [:tenant@closest1, :tenant@closest2]
expect(:syn, :members, fn RegionNodes, ^region ->
@@ -39,7 +108,7 @@ defmodule Realtime.NodesTest do
assert region == expected_region
end
- test "on existing tenant id, and a single node for a given region, returns default", %{
+ test "on existing tenant id, and a single node for a given region, returns single node", %{
tenant: tenant,
region: region
} do
@@ -48,7 +117,7 @@ defmodule Realtime.NodesTest do
expected_region = Tenants.region(tenant)
- assert node == node()
+ assert node != node()
assert region == expected_region
end
diff --git a/test/realtime/postgres_decoder_test.exs b/test/realtime/postgres_decoder_test.exs
index 9516e5e9a..bd9a0c579 100644
--- a/test/realtime/postgres_decoder_test.exs
+++ b/test/realtime/postgres_decoder_test.exs
@@ -2,24 +2,23 @@ defmodule Realtime.PostgresDecoderTest do
use ExUnit.Case, async: true
alias Realtime.Adapters.Postgres.Decoder
- alias Decoder.Messages.{
- Begin,
- Commit,
- Origin,
- Relation,
- Relation.Column,
- Insert,
- Update,
- Delete,
- Truncate,
- Type
- }
+ alias Decoder.Messages.Begin
+ alias Decoder.Messages.Commit
+ alias Decoder.Messages.Insert
+ alias Decoder.Messages.Origin
+ alias Decoder.Messages.Relation
+ alias Decoder.Messages.Relation.Column
+ alias Decoder.Messages.Type
+ alias Decoder.Messages.Unsupported
test "decodes begin messages" do
{:ok, expected_dt_no_microseconds, 0} = DateTime.from_iso8601("2019-07-18T17:02:35Z")
expected_dt = DateTime.add(expected_dt_no_microseconds, 726_322, :microsecond)
- assert Decoder.decode_message(<<66, 0, 0, 0, 2, 167, 244, 168, 128, 0, 2, 48, 246, 88, 88, 213, 242, 0, 0, 2, 107>>) ==
+ assert Decoder.decode_message(
+ <<66, 0, 0, 0, 2, 167, 244, 168, 128, 0, 2, 48, 246, 88, 88, 213, 242, 0, 0, 2, 107>>,
+ %{}
+ ) ==
%Begin{commit_timestamp: expected_dt, final_lsn: {2, 2_817_828_992}, xid: 619}
end
@@ -28,7 +27,8 @@ defmodule Realtime.PostgresDecoderTest do
expected_dt = DateTime.add(expected_dt_no_microseconds, 726_322, :microsecond)
assert Decoder.decode_message(
- <<67, 0, 0, 0, 0, 2, 167, 244, 168, 128, 0, 0, 0, 2, 167, 244, 168, 176, 0, 2, 48, 246, 88, 88, 213, 242>>
+ <<67, 0, 0, 0, 0, 2, 167, 244, 168, 128, 0, 0, 0, 2, 167, 244, 168, 176, 0, 2, 48, 246, 88, 88, 213, 242>>,
+ %{}
) == %Commit{
flags: [],
lsn: {2, 2_817_828_992},
@@ -38,7 +38,7 @@ defmodule Realtime.PostgresDecoderTest do
end
test "decodes origin messages" do
- assert Decoder.decode_message(<<79, 0, 0, 0, 2, 167, 244, 168, 128>> <> "Elmer Fud") ==
+ assert Decoder.decode_message(<<79, 0, 0, 0, 2, 167, 244, 168, 128>> <> "Elmer Fud", %{}) ==
%Origin{
origin_commit_lsn: {2, 2_817_828_992},
name: "Elmer Fud"
@@ -48,7 +48,8 @@ defmodule Realtime.PostgresDecoderTest do
test "decodes relation messages" do
assert Decoder.decode_message(
<<82, 0, 0, 96, 0, 112, 117, 98, 108, 105, 99, 0, 102, 111, 111, 0, 100, 0, 2, 0, 98, 97, 114, 0, 0, 0, 0,
- 25, 255, 255, 255, 255, 1, 105, 100, 0, 0, 0, 0, 23, 255, 255, 255, 255>>
+ 25, 255, 255, 255, 255, 1, 105, 100, 0, 0, 0, 0, 23, 255, 255, 255, 255>>,
+ %{}
) == %Relation{
id: 24_576,
namespace: "public",
@@ -74,7 +75,8 @@ defmodule Realtime.PostgresDecoderTest do
test "decodes type messages" do
assert Decoder.decode_message(
<<89, 0, 0, 128, 52, 112, 117, 98, 108, 105, 99, 0, 101, 120, 97, 109, 112, 108, 101, 95, 116, 121, 112,
- 101, 0>>
+ 101, 0>>,
+ %{}
) ==
%Type{
id: 32_820,
@@ -83,110 +85,103 @@ defmodule Realtime.PostgresDecoderTest do
}
end
- describe "truncate messages" do
- test "decodes messages" do
- assert Decoder.decode_message(<<84, 0, 0, 0, 1, 0, 0, 0, 96, 0>>) ==
- %Truncate{
- number_of_relations: 1,
- options: [],
- truncated_relations: [24_576]
- }
- end
-
- test "decodes messages with cascade option" do
- assert Decoder.decode_message(<<84, 0, 0, 0, 1, 1, 0, 0, 96, 0>>) ==
- %Truncate{
- number_of_relations: 1,
- options: [:cascade],
- truncated_relations: [24_576]
- }
- end
-
- test "decodes messages with restart identity option" do
- assert Decoder.decode_message(<<84, 0, 0, 0, 1, 2, 0, 0, 96, 0>>) ==
- %Truncate{
- number_of_relations: 1,
- options: [:restart_identity],
- truncated_relations: [24_576]
- }
- end
- end
-
describe "data message (TupleData) decoder" do
- test "decodes insert messages" do
- assert Decoder.decode_message(
- <<73, 0, 0, 96, 0, 78, 0, 2, 116, 0, 0, 0, 3, 98, 97, 122, 116, 0, 0, 0, 3, 53, 54, 48>>
- ) == %Insert{
- relation_id: 24_576,
- tuple_data: {"baz", "560"}
- }
+ setup do
+ relation = %{
+ id: 24_576,
+ namespace: "public",
+ name: "foo",
+ columns: [
+ %Column{name: "id", type: "uuid"},
+ %Column{name: "bar", type: "text"}
+ ]
+ }
+
+ %{relation: relation}
end
- test "decodes insert messages with null values" do
- assert Decoder.decode_message(<<73, 0, 0, 96, 0, 78, 0, 2, 110, 116, 0, 0, 0, 3, 53, 54, 48>>) == %Insert{
- relation_id: 24_576,
- tuple_data: {nil, "560"}
- }
- end
+ test "decodes insert messages", %{relation: relation} do
+ uuid = UUID.uuid4()
+ string = Generators.random_string()
+
+ data =
+ "I" <>
+ <> <>
+ "N" <>
+ <<2::integer-16>> <>
+ "b" <>
+ <<16::integer-32>> <>
+ UUID.string_to_binary!(uuid) <>
+ "b" <>
+ <> <>
+ string
- test "decodes insert messages with unchanged toasted values" do
- assert Decoder.decode_message(<<73, 0, 0, 96, 0, 78, 0, 2, 117, 116, 0, 0, 0, 3, 53, 54, 48>>) == %Insert{
- relation_id: 24_576,
- tuple_data: {:unchanged_toast, "560"}
- }
- end
-
- test "decodes update messages with default replica identity setting" do
assert Decoder.decode_message(
- <<85, 0, 0, 96, 0, 78, 0, 2, 116, 0, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 116, 0, 0, 0, 3, 53, 54,
- 48>>
- ) == %Update{
- relation_id: 24_576,
- changed_key_tuple_data: nil,
- old_tuple_data: nil,
- tuple_data: {"example", "560"}
+ data,
+ %{relation.id => relation}
+ ) == %Insert{
+ relation_id: relation.id,
+ tuple_data: {uuid, string}
}
end
- test "decodes update messages with FULL replica identity setting" do
- assert Decoder.decode_message(
- <<85, 0, 0, 96, 0, 79, 0, 2, 116, 0, 0, 0, 3, 98, 97, 122, 116, 0, 0, 0, 3, 53, 54, 48, 78, 0, 2, 116, 0,
- 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 116, 0, 0, 0, 3, 53, 54, 48>>
- ) == %Update{
- relation_id: 24_576,
- changed_key_tuple_data: nil,
- old_tuple_data: {"baz", "560"},
- tuple_data: {"example", "560"}
- }
- end
+ test "ignores unknown relations", %{relation: relation} do
+ uuid = UUID.uuid4()
+ string = Generators.random_string()
+
+ data =
+ "I" <>
+ <<679::integer-32>> <>
+ "N" <>
+ <<2::integer-16>> <>
+ "b" <>
+ <<16::integer-32>> <>
+ UUID.string_to_binary!(uuid) <>
+ "b" <>
+ <> <>
+ string
- test "decodes update messages with USING INDEX replica identity setting" do
assert Decoder.decode_message(
- <<85, 0, 0, 96, 0, 75, 0, 2, 116, 0, 0, 0, 3, 98, 97, 122, 110, 78, 0, 2, 116, 0, 0, 0, 7, 101, 120, 97,
- 109, 112, 108, 101, 116, 0, 0, 0, 3, 53, 54, 48>>
- ) == %Update{
- relation_id: 24_576,
- changed_key_tuple_data: {"baz", nil},
- old_tuple_data: nil,
- tuple_data: {"example", "560"}
- }
+ data,
+ %{relation.id => relation}
+ ) == %Unsupported{}
end
- test "decodes DELETE messages with USING INDEX replica identity setting" do
- assert Decoder.decode_message(
- <<68, 0, 0, 96, 0, 75, 0, 2, 116, 0, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 110>>
- ) == %Delete{
- relation_id: 24_576,
- changed_key_tuple_data: {"example", nil}
+ test "decodes insert messages with null values", %{relation: relation} do
+ string = Generators.random_string()
+
+ data =
+ "I" <>
+ <> <>
+ "N" <>
+ <<2::integer-16>> <>
+ "n" <>
+ "b" <>
+ <> <>
+ string
+
+ assert Decoder.decode_message(data, %{relation.id => relation}) == %Insert{
+ relation_id: relation.id,
+ tuple_data: {nil, string}
}
end
- test "decodes DELETE messages with FULL replica identity setting" do
- assert Decoder.decode_message(
- <<68, 0, 0, 96, 0, 79, 0, 2, 116, 0, 0, 0, 3, 98, 97, 122, 116, 0, 0, 0, 3, 53, 54, 48>>
- ) == %Delete{
- relation_id: 24_576,
- old_tuple_data: {"baz", "560"}
+ test "decodes insert messages with unchanged toasted values", %{relation: relation} do
+ string = Generators.random_string()
+
+ data =
+ "I" <>
+ <> <>
+ "N" <>
+ <<2::integer-16>> <>
+ "u" <>
+ "b" <>
+ <> <>
+ string
+
+ assert Decoder.decode_message(data, %{relation.id => relation}) == %Insert{
+ relation_id: relation.id,
+ tuple_data: {:unchanged_toast, string}
}
end
end
diff --git a/test/realtime/rate_counter/rate_counter_test.exs b/test/realtime/rate_counter/rate_counter_test.exs
index 6d3f57401..1c3d8af07 100644
--- a/test/realtime/rate_counter/rate_counter_test.exs
+++ b/test/realtime/rate_counter/rate_counter_test.exs
@@ -22,7 +22,7 @@ defmodule Realtime.RateCounterTest do
max_bucket_len: 60,
tick: 1000,
tick_ref: _,
- idle_shutdown: 900_000,
+ idle_shutdown: 600_000,
idle_shutdown_ref: _,
telemetry: %{emit: false},
limit: %{log: false}
@@ -62,7 +62,7 @@ defmodule Realtime.RateCounterTest do
max_bucket_len: 60,
tick: 10,
tick_ref: _,
- idle_shutdown: 900_000,
+ idle_shutdown: 600_000,
idle_shutdown_ref: _,
telemetry: %{
emit: true,
@@ -197,7 +197,7 @@ defmodule Realtime.RateCounterTest do
id: id,
opts: [
tick: 100,
- max_bucket_len: 3,
+ max_bucket_len: 5,
limit: [
value: 49,
measurement: :sum,
@@ -215,7 +215,7 @@ defmodule Realtime.RateCounterTest do
avg: +0.0,
sum: 0,
bucket: _,
- max_bucket_len: 3,
+ max_bucket_len: 5,
telemetry: %{emit: false},
limit: %{
log: true,
@@ -228,7 +228,7 @@ defmodule Realtime.RateCounterTest do
log =
capture_log(fn ->
GenCounter.add(args.id, 100)
- Process.sleep(100)
+ Process.sleep(120)
end)
assert {:ok, %RateCounter{sum: sum, limit: %{triggered: true}}} = RateCounter.get(args)
@@ -239,7 +239,7 @@ defmodule Realtime.RateCounterTest do
# Splitting by the error message returns the error message and the rest of the log only
assert length(String.split(log, "ErrorMessage: Reason")) == 2
- Process.sleep(400)
+ Process.sleep(600)
assert {:ok, %RateCounter{sum: 0, limit: %{triggered: false}}} = RateCounter.get(args)
end
@@ -260,10 +260,10 @@ defmodule Realtime.RateCounterTest do
test "rate counters shut themselves down when no activity occurs on the GenCounter" do
args = %Args{id: {:domain, :metric, Ecto.UUID.generate()}}
- {:ok, pid} = RateCounter.new(args, idle_shutdown: 5)
+ {:ok, pid} = RateCounter.new(args, idle_shutdown: 100)
Process.monitor(pid)
- assert_receive {:DOWN, _ref, :process, ^pid, :normal}, 25
+ assert_receive {:DOWN, _ref, :process, ^pid, :normal}, 200
# Cache has not expired yet
assert {:ok, %RateCounter{}} = Cachex.get(RateCounter, args.id)
Process.sleep(2000)
@@ -316,37 +316,5 @@ defmodule Realtime.RateCounterTest do
end
end
- describe "stop/1" do
- test "stops rate counters for a given entity" do
- entity_id = Ecto.UUID.generate()
- fake_terms = Enum.map(1..10, fn _ -> {:domain, :"metric_#{random_string()}", Ecto.UUID.generate()} end)
- terms = Enum.map(1..10, fn _ -> {:domain, :"metric_#{random_string()}", entity_id} end)
-
- for term <- terms do
- args = %Args{id: term}
- {:ok, _} = RateCounter.new(args)
- assert {:ok, %RateCounter{}} = RateCounter.get(args)
- end
-
- for term <- fake_terms do
- args = %Args{id: term}
- {:ok, _} = RateCounter.new(args)
- assert {:ok, %RateCounter{}} = RateCounter.get(args)
- end
-
- assert :ok = RateCounter.stop(entity_id)
- # Wait for processes to shut down and Registry to update
- Process.sleep(100)
-
- for term <- terms do
- assert [] = Registry.lookup(Realtime.Registry.Unique, {RateCounter, :rate_counter, term})
- end
-
- for term <- fake_terms do
- assert [{_pid, _value}] = Registry.lookup(Realtime.Registry.Unique, {RateCounter, :rate_counter, term})
- end
- end
- end
-
def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {event, measures, metadata})
end
diff --git a/test/realtime/repo_replica_test.exs b/test/realtime/repo_replica_test.exs
index a3734d31b..0b988205b 100644
--- a/test/realtime/repo_replica_test.exs
+++ b/test/realtime/repo_replica_test.exs
@@ -1,14 +1,17 @@
defmodule Realtime.Repo.ReplicaTest do
- use ExUnit.Case
+ # application env being changed
+ use ExUnit.Case, async: false
alias Realtime.Repo.Replica
setup do
previous_platform = Application.get_env(:realtime, :platform)
previous_region = Application.get_env(:realtime, :region)
+ previous_master_region = Application.get_env(:realtime, :master_region)
on_exit(fn ->
Application.put_env(:realtime, :platform, previous_platform)
Application.put_env(:realtime, :region, previous_region)
+ Application.put_env(:realtime, :master_region, previous_master_region)
end)
end
@@ -16,12 +19,20 @@ defmodule Realtime.Repo.ReplicaTest do
for {region, mod} <- Replica.replicas_aws() do
setup do
Application.put_env(:realtime, :platform, :aws)
+ Application.put_env(:realtime, :master_region, "special-region")
+ :ok
end
test "handles #{region} region" do
Application.put_env(:realtime, :region, unquote(region))
replica_asserts(unquote(mod), Replica.replica())
end
+
+ test "defaults to Realtime.Repo if region is equal to master region on #{region}" do
+ Application.put_env(:realtime, :region, unquote(region))
+ Application.put_env(:realtime, :master_region, unquote(region))
+ replica_asserts(Realtime.Repo, Replica.replica())
+ end
end
test "defaults to Realtime.Repo if region is not configured" do
@@ -34,6 +45,8 @@ defmodule Realtime.Repo.ReplicaTest do
for {region, mod} <- Replica.replicas_fly() do
setup do
Application.put_env(:realtime, :platform, :fly)
+ Application.put_env(:realtime, :master_region, "special-region")
+ :ok
end
test "handles #{region} region" do
diff --git a/test/realtime/rpc_test.exs b/test/realtime/rpc_test.exs
index 221cd781b..9c83d7064 100644
--- a/test/realtime/rpc_test.exs
+++ b/test/realtime/rpc_test.exs
@@ -81,8 +81,7 @@ defmodule Realtime.RpcTest do
func: :test_success,
origin_node: ^origin_node,
target_node: ^node,
- success: true,
- tenant: "123"
+ success: true
}}
end
@@ -100,8 +99,7 @@ defmodule Realtime.RpcTest do
func: :test_raise,
origin_node: ^origin_node,
target_node: ^node,
- success: false,
- tenant: "123"
+ success: false
}}
end
diff --git a/test/realtime/syn_handler_test.exs b/test/realtime/syn_handler_test.exs
index 2b27cf322..96a2e316a 100644
--- a/test/realtime/syn_handler_test.exs
+++ b/test/realtime/syn_handler_test.exs
@@ -13,8 +13,15 @@ defmodule Realtime.SynHandlerTest do
defmodule FakeConnect do
use GenServer
+ def start_link([tenant_id, region, opts]) do
+ name = {Connect, tenant_id, %{conn: nil, region: region}}
+ gen_opts = [name: {:via, :syn, name}]
+ GenServer.start_link(FakeConnect, [tenant_id, opts], gen_opts)
+ end
+
def init([tenant_id, opts]) do
- :syn.update_registry(Connect, tenant_id, fn _pid, meta -> %{meta | conn: "fake_conn"} end)
+ conn = Keyword.get(opts, :conn, "remote_conn")
+ :syn.update_registry(Connect, tenant_id, fn _pid, meta -> %{meta | conn: conn} end)
if opts[:trap_exit], do: Process.flag(:trap_exit, true)
@@ -28,125 +35,184 @@ defmodule Realtime.SynHandlerTest do
Code.eval_quoted(@aux_mod)
- defp assert_process_down(pid, reason \\ nil, timeout \\ 100) do
- ref = Process.monitor(pid)
+ # > :"main@127.0.0.11" < :"atest@127.0.0.1"
+ # false
+ # iex(2)> :erlang.phash2("tenant123", 2)
+ # 0
+ # iex(3)> :erlang.phash2("tenant999", 2)
+ # 1
+ describe "integration test with a Connect conflict name=atest" do
+ setup do
+ {:ok, pid, node} =
+ Clustered.start_disconnected(@aux_mod, name: :atest, extra_config: [{:realtime, :region, "ap-southeast-2"}])
- if reason do
- assert_receive {:DOWN, ^ref, :process, ^pid, ^reason}, timeout
- else
- assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
+ %{peer_pid: pid, node: node}
+ end
+
+ @tag tenant_id: "tenant999"
+ test "tenant hash = 1", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 1
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn"]]})
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+ on_exit(fn -> Process.exit(remote_pid, :brutal_kill) end)
+
+ log =
+ capture_log(fn ->
+ # Connect to peer node to cause a conflict on syn
+ true = Node.connect(node)
+ # Give some time for the conflict resolution to happen on the other node
+ Process.sleep(500)
+
+ # Both nodes agree
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
+
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} = :syn.lookup(Connect, tenant_id)
+
+ assert :peer.call(peer_pid, Process, :alive?, [remote_pid])
+
+ refute Process.alive?(local_pid)
+ end)
+
+ assert log =~ "stop local process: #{inspect(local_pid)}"
+ assert log =~ "Successfully stopped #{inspect(local_pid)}"
+
+ assert log =~
+ "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"#{tenant_id}\" #{inspect(local_pid)}"
+ end
+
+ @tag tenant_id: "tenant123"
+ test "tenant hash = 0", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 0
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn"]]})
+ on_exit(fn -> Process.exit(remote_pid, :kill) end)
+
+ log =
+ capture_log(fn ->
+ # Connect to peer node to cause a conflict on syn
+ true = Node.connect(node)
+ # Give some time for the conflict resolution to happen on the other node
+ Process.sleep(500)
+
+ # Both nodes agree
+ assert {^local_pid, %{region: "us-east-1", conn: "local_conn"}} = :syn.lookup(Connect, tenant_id)
+
+ assert {^local_pid, %{region: "us-east-1", conn: "local_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
+
+ refute :peer.call(peer_pid, Process, :alive?, [remote_pid])
+
+ assert Process.alive?(local_pid)
+ end)
+
+ assert log =~ "remote process will be stopped: #{inspect(remote_pid)}"
end
end
- describe "integration test with a Connect conflict" do
+ # > :"main@127.0.0.11" < :"test@127.0.0.1"
+ # true
+ # iex(2)> :erlang.phash2("tenant123", 2)
+ # 0
+ # iex(3)> :erlang.phash2("tenant999", 2)
+ # 1
+ describe "integration test with a Connect conflict name=test" do
setup do
- ensure_connect_down("dev_tenant")
- {:ok, pid, node} = Clustered.start_disconnected(@aux_mod, extra_config: [{:realtime, :region, "ap-southeast-2"}])
- Endpoint.subscribe("connect:dev_tenant")
+ {:ok, pid, node} =
+ Clustered.start_disconnected(@aux_mod, name: :test, extra_config: [{:realtime, :region, "ap-southeast-2"}])
+
%{peer_pid: pid, node: node}
end
- test "local node started first", %{node: node, peer_pid: peer_pid} do
- external_id = "dev_tenant"
- # start connect locally first
- {:ok, db_conn} = Connect.lookup_or_start_connection(external_id)
- assert Connect.ready?(external_id)
- connect = Connect.whereis(external_id)
- assert node(connect) == node()
-
- # Now let's force the remote node to start the fake Connect process
- name = {Connect, external_id, %{conn: nil, region: "ap-southeast-2"}}
- opts = [name: {:via, :syn, name}]
- {:ok, remote_pid} = :peer.call(peer_pid, GenServer, :start_link, [FakeConnect, [external_id, []], opts])
+ @tag tenant_id: "tenant999"
+ test "tenant hash = 1", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 1
+ Endpoint.subscribe("connect:#{tenant_id}")
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn"]]})
+
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+
on_exit(fn -> Process.exit(remote_pid, :brutal_kill) end)
log =
capture_log(fn ->
- Endpoint.subscribe("connect:dev_tenant")
# Connect to peer node to cause a conflict on syn
true = Node.connect(node)
# Give some time for the conflict resolution to happen on the other node
Process.sleep(500)
# Both nodes agree
- assert {^connect, %{region: "us-east-1", conn: ^db_conn}} = :syn.lookup(Connect, external_id)
+ assert {^local_pid, %{region: "us-east-1", conn: "local_conn"}} = :syn.lookup(Connect, tenant_id)
- assert {^connect, %{region: "us-east-1", conn: ^db_conn}} =
- :peer.call(peer_pid, :syn, :lookup, [Connect, external_id])
+ assert {^local_pid, %{region: "us-east-1", conn: "local_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
refute :peer.call(peer_pid, Process, :alive?, [remote_pid])
- assert Process.alive?(connect)
+ assert Process.alive?(local_pid)
end)
assert log =~ "remote process will be stopped: #{inspect(remote_pid)}"
end
- test "remote node started first", %{node: node, peer_pid: peer_pid} do
- external_id = "dev_tenant"
+ @tag tenant_id: "tenant123"
+ test "tenant hash = 0", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 0
# Start remote process first
- name = {Connect, external_id, %{conn: nil, region: "ap-southeast-2"}}
- opts = [name: {:via, :syn, name}]
- {:ok, remote_pid} = :peer.call(peer_pid, GenServer, :start_link, [FakeConnect, [external_id, []], opts])
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+
on_exit(fn -> Process.exit(remote_pid, :kill) end)
# start connect locally later
- {:ok, _db_conn} = Connect.lookup_or_start_connection(external_id)
- assert Connect.ready?(external_id)
- connect = Connect.whereis(external_id)
- assert node(connect) == node()
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn"]]})
log =
capture_log(fn ->
# Connect to peer node to cause a conflict on syn
true = Node.connect(node)
- assert_process_down(connect)
- assert_receive %{event: "connect_down"}
+ # Give some time for the conflict resolution to happen on the other node
+ Process.sleep(500)
# Both nodes agree
- assert {^remote_pid, %{region: "ap-southeast-2", conn: "fake_conn"}} =
- :peer.call(peer_pid, :syn, :lookup, [Connect, external_id])
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
- assert {^remote_pid, %{region: "ap-southeast-2", conn: "fake_conn"}} = :syn.lookup(Connect, external_id)
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} = :syn.lookup(Connect, tenant_id)
assert :peer.call(peer_pid, Process, :alive?, [remote_pid])
- refute Process.alive?(connect)
+ refute Process.alive?(local_pid)
end)
- assert log =~ "stop local process: #{inspect(connect)}"
- assert log =~ "Successfully stopped #{inspect(connect)}"
+ assert log =~ "stop local process: #{inspect(local_pid)}"
+ assert log =~ "Successfully stopped #{inspect(local_pid)}"
assert log =~
- "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"dev_tenant\" #{inspect(connect)}"
+ "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"#{tenant_id}\" #{inspect(local_pid)}"
end
- test "remote node started first but timed out stopping", %{node: node, peer_pid: peer_pid} do
- external_id = "dev_tenant"
+ @tag tenant_id: "tenant123"
+ test "tenant hash = 0 but timed out stopping", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 0
# Start remote process first
- name = {Connect, external_id, %{conn: nil, region: "ap-southeast-2"}}
- opts = [name: {:via, :syn, name}]
- {:ok, remote_pid} = :peer.call(peer_pid, GenServer, :start_link, [FakeConnect, [external_id, []], opts])
- on_exit(fn -> Process.exit(remote_pid, :brutal_kill) end)
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
- {:ok, local_pid} =
- start_supervised(%{
- id: self(),
- start: {GenServer, :start_link, [FakeConnect, [external_id, [trap_exit: true]], opts]}
- })
+ on_exit(fn -> Process.exit(remote_pid, :kill) end)
+
+ # start connect locally later
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn", trap_exit: true]]})
log =
capture_log(fn ->
# Connect to peer node to cause a conflict on syn
true = Node.connect(node)
assert_process_down(local_pid, :killed, 6000)
- assert_receive %{event: "connect_down"}
# Both nodes agree
- assert {^remote_pid, %{region: "ap-southeast-2", conn: "fake_conn"}} =
- :peer.call(peer_pid, :syn, :lookup, [Connect, external_id])
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
- assert {^remote_pid, %{region: "ap-southeast-2", conn: "fake_conn"}} = :syn.lookup(Connect, external_id)
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} = :syn.lookup(Connect, tenant_id)
assert :peer.call(peer_pid, Process, :alive?, [remote_pid])
@@ -157,7 +223,7 @@ defmodule Realtime.SynHandlerTest do
assert log =~ "Timed out while waiting for process #{inspect(local_pid)} to stop. Sending kill exit signal"
assert log =~
- "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"dev_tenant\" #{inspect(local_pid)}"
+ "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"#{tenant_id}\" #{inspect(local_pid)}"
end
end
@@ -168,32 +234,50 @@ defmodule Realtime.SynHandlerTest do
test "it handles :syn_conflict_resolution reason" do
reason = :syn_conflict_resolution
+ pid = self()
log =
capture_log(fn ->
- assert SynHandler.on_process_unregistered(@mod, @name, self(), %{}, reason) == :ok
+ assert SynHandler.on_process_unregistered(@mod, @name, pid, %{}, reason) == :ok
end)
topic = "#{@topic}:#{@name}"
event = "#{@topic}_down"
assert log =~ "#{@mod} terminated due to syn conflict resolution: #{inspect(@name)} #{inspect(self())}"
- assert_receive %Phoenix.Socket.Broadcast{topic: ^topic, event: ^event, payload: nil}
+ assert_receive %Phoenix.Socket.Broadcast{topic: ^topic, event: ^event, payload: %{reason: ^reason, pid: ^pid}}
end
test "it handles other reasons" do
reason = :other_reason
+ pid = self()
log =
capture_log(fn ->
- assert SynHandler.on_process_unregistered(@mod, @name, self(), %{}, reason) == :ok
+ assert SynHandler.on_process_unregistered(@mod, @name, pid, %{}, reason) == :ok
end)
topic = "#{@topic}:#{@name}"
event = "#{@topic}_down"
refute log =~ "#{@mod} terminated: #{inspect(@name)} #{node()}"
- assert_receive %Phoenix.Socket.Broadcast{topic: ^topic, event: ^event, payload: nil}, 500
+
+ assert_receive %Phoenix.Socket.Broadcast{
+ topic: ^topic,
+ event: ^event,
+ payload: %{reason: ^reason, pid: ^pid}
+ },
+ 500
+ end
+ end
+
+ defp assert_process_down(pid, reason, timeout) do
+ ref = Process.monitor(pid)
+
+ if reason do
+ assert_receive {:DOWN, ^ref, :process, ^pid, ^reason}, timeout
+ else
+ assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
end
end
end
diff --git a/test/realtime/tenants/authorization_remote_test.exs b/test/realtime/tenants/authorization_remote_test.exs
index 53efe44ec..e21148fd1 100644
--- a/test/realtime/tenants/authorization_remote_test.exs
+++ b/test/realtime/tenants/authorization_remote_test.exs
@@ -100,8 +100,9 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
Authorization.get_read_authorizations(%Policies{}, pid, context.authorization_context)
end
- # Waiting for RateCounter to limit
- Process.sleep(1100)
+ # Force RateCounter to tick
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
for _ <- 1..10 do
{:error, :increase_connection_pool} =
@@ -127,8 +128,9 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
Authorization.get_write_authorizations(%Policies{}, pid, context.authorization_context)
end
- # Waiting for RateCounter to limit
- Process.sleep(1100)
+ # Force RateCounter to tick
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
for _ <- 1..10 do
{:error, :increase_connection_pool} =
@@ -184,8 +186,9 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
end)
Task.await_many([t1, t2], 20_000)
- # Wait for RateCounter to log
- Process.sleep(1000)
+ # Force RateCounter to tick and log error
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
end)
external_id = context.tenant.external_id
@@ -241,7 +244,7 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
Connect.shutdown("dev_tenant")
# Waiting for :syn to unregister
Process.sleep(100)
- Realtime.RateCounter.stop("dev_tenant")
+ RateCounterHelper.stop("dev_tenant")
{:ok, local_db_conn} = Database.connect(tenant, "realtime_test", :stop)
topic = random_string()
diff --git a/test/realtime/tenants/authorization_test.exs b/test/realtime/tenants/authorization_test.exs
index 724e6e933..10c9c0e09 100644
--- a/test/realtime/tenants/authorization_test.exs
+++ b/test/realtime/tenants/authorization_test.exs
@@ -8,7 +8,7 @@ defmodule Realtime.Tenants.AuthorizationTest do
alias Realtime.Api.Message
alias Realtime.Database
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
alias Realtime.Tenants.Authorization
alias Realtime.Tenants.Authorization.Policies
alias Realtime.Tenants.Authorization.Policies.BroadcastPolicies
@@ -105,8 +105,9 @@ defmodule Realtime.Tenants.AuthorizationTest do
Authorization.get_read_authorizations(%Policies{}, pid, context.authorization_context)
end
- # Waiting for RateCounter to limit
- Process.sleep(1100)
+ # Force RateCounter to tick
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
# The next auth requests will not call the database due to being rate limited
reject(&Database.transaction/4)
@@ -118,9 +119,8 @@ defmodule Realtime.Tenants.AuthorizationTest do
assert log =~ "IncreaseConnectionPool: Too many database timeouts"
- # Only one log message should be emitted
- # Splitting by the error message returns the error message and the rest of the log only
- assert length(String.split(log, "IncreaseConnectionPool: Too many database timeouts")) == 2
+ # Only one or two log messages should be emitted
+ assert length(String.split(log, "IncreaseConnectionPool: Too many database timeouts")) <= 3
end
@tag role: "anon", policies: []
@@ -135,8 +135,9 @@ defmodule Realtime.Tenants.AuthorizationTest do
Authorization.get_write_authorizations(%Policies{}, pid, context.authorization_context)
end
- # Waiting for RateCounter to limit
- Process.sleep(1100)
+ # Force RateCounter to tick
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
# The next auth requests will not call the database due to being rate limited
reject(&Database.transaction/4)
@@ -192,8 +193,9 @@ defmodule Realtime.Tenants.AuthorizationTest do
end)
Task.await_many([t1, t2], 20_000)
- # Wait for RateCounter log
- Process.sleep(1000)
+ # Force RateCounter to tick and log error
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
end)
external_id = context.tenant.external_id
@@ -280,7 +282,7 @@ defmodule Realtime.Tenants.AuthorizationTest do
def rls_context(context) do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
{:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
topic = context[:topic] || random_string()
@@ -318,9 +320,9 @@ defmodule Realtime.Tenants.AuthorizationTest do
extensions = [Map.from_struct(%{extension | :settings => settings})]
- {:ok, tenant} = Realtime.Api.update_tenant(tenant, %{extensions: extensions})
+ {:ok, tenant} = Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
end
end
diff --git a/test/realtime/tenants/batch_broadcast_test.exs b/test/realtime/tenants/batch_broadcast_test.exs
new file mode 100644
index 000000000..f5fa42764
--- /dev/null
+++ b/test/realtime/tenants/batch_broadcast_test.exs
@@ -0,0 +1,529 @@
+defmodule Realtime.Tenants.BatchBroadcastTest do
+ use RealtimeWeb.ConnCase, async: true
+ use Mimic
+
+ alias Realtime.Database
+ alias Realtime.GenCounter
+ alias Realtime.RateCounter
+ alias Realtime.Tenants
+ alias Realtime.Tenants.BatchBroadcast
+ alias Realtime.Tenants.Authorization
+ alias Realtime.Tenants.Authorization.Policies
+ alias Realtime.Tenants.Authorization.Policies.BroadcastPolicies
+ alias Realtime.Tenants.Connect
+
+ alias RealtimeWeb.TenantBroadcaster
+
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ Realtime.Tenants.Cache.update_cache(tenant)
+ {:ok, tenant: tenant}
+ end
+
+ describe "public message broadcasting" do
+ test "broadcasts multiple public messages successfully", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic1 = random_string()
+ topic2 = random_string()
+
+ messages = %{
+ messages: [
+ %{topic: topic1, payload: %{"data" => "test1"}, event: "event1"},
+ %{topic: topic2, payload: %{"data" => "test2"}, event: "event2"},
+ %{topic: topic1, payload: %{"data" => "test3"}, event: "event3"}
+ ]
+ }
+
+ expect(GenCounter, :add, 3, fn ^broadcast_events_key -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 3, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ end
+
+ test "public messages do not have private prefix in topic", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic = random_string()
+
+ messages = %{
+ messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1"}]
+ }
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, fn _, topic, _, _, _ ->
+ refute String.contains?(topic, "-private")
+ end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ end
+ end
+
+ describe "message ID metadata" do
+ test "includes message ID in metadata when provided", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic = random_string()
+
+ messages = %{
+ messages: [%{id: "msg-123", topic: topic, payload: %{"data" => "test"}, event: "event1"}]
+ }
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, fn _, _, broadcast, _, _ ->
+ assert %Phoenix.Socket.Broadcast{
+ payload: %{
+ "payload" => %{"data" => "test"},
+ "event" => "event1",
+ "type" => "broadcast",
+ "meta" => %{"id" => "msg-123"}
+ }
+ } = broadcast
+ end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ end
+ end
+
+ describe "super user broadcasting" do
+ test "bypasses authorization for private messages with super_user flag", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic1 = random_string()
+ topic2 = random_string()
+
+ messages = %{
+ messages: [
+ %{topic: topic1, payload: %{"data" => "test1"}, event: "event1", private: true},
+ %{topic: topic2, payload: %{"data" => "test2"}, event: "event2", private: true}
+ ]
+ }
+
+ expect(GenCounter, :add, 2, fn ^broadcast_events_key -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 2, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, true)
+ end
+
+ test "private messages have private prefix in topic", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic = random_string()
+
+ messages = %{
+ messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1", private: true}]
+ }
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, fn _, topic, _, _, _ ->
+ assert String.contains?(topic, "-private")
+ end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, true)
+ end
+ end
+
+ describe "private message authorization" do
+ test "broadcasts private messages with valid authorization", %{tenant: tenant} do
+ topic = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ topic: topic,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ messages = %{messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1", private: true}]}
+
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+
+ expect(GenCounter, :add, 1, fn ^broadcast_events_key -> :ok end)
+
+ Authorization
+ |> expect(:build_authorization_params, fn params -> params end)
+ |> expect(:get_write_authorizations, fn _, _ -> {:ok, %Policies{broadcast: %BroadcastPolicies{write: true}}} end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, 1, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+ end
+
+ test "skips private messages without authorization", %{tenant: tenant} do
+ topic = random_string()
+ sub = random_string()
+ role = "anon"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ topic: topic,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ Authorization
+ |> expect(:build_authorization_params, 1, fn params -> params end)
+ |> expect(:get_write_authorizations, 1, fn _, _ ->
+ {:ok, %Policies{broadcast: %BroadcastPolicies{write: false}}}
+ end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ messages = %{
+ messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1", private: true}]
+ }
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+
+ assert calls(&TenantBroadcaster.pubsub_broadcast/5) == []
+ end
+
+ test "broadcasts only authorized topics in mixed authorization batch", %{tenant: tenant} do
+ topic = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ messages = %{
+ messages: [
+ %{topic: topic, payload: %{"data" => "test1"}, event: "event1", private: true},
+ %{topic: random_string(), payload: %{"data" => "test2"}, event: "event2", private: true}
+ ]
+ }
+
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+
+ Authorization
+ |> expect(:build_authorization_params, 2, fn params -> params end)
+ |> expect(:get_write_authorizations, 2, fn
+ _, %{topic: ^topic} -> %Policies{broadcast: %BroadcastPolicies{write: true}}
+ _, _ -> %Policies{broadcast: %BroadcastPolicies{write: false}}
+ end)
+
+ # Only one topic will actually be broadcasted
+ expect(TenantBroadcaster, :pubsub_broadcast, 1, fn _, _, %Phoenix.Socket.Broadcast{topic: ^topic}, _, _ ->
+ :ok
+ end)
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+ end
+
+ test "groups messages by topic and checks authorization once per topic", %{tenant: tenant} do
+ topic_1 = random_string()
+ topic_2 = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ messages = %{
+ messages: [
+ %{topic: topic_1, payload: %{"data" => "test1"}, event: "event1", private: true},
+ %{topic: topic_2, payload: %{"data" => "test2"}, event: "event2", private: true},
+ %{topic: topic_1, payload: %{"data" => "test3"}, event: "event3", private: true}
+ ]
+ }
+
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+
+ expect(GenCounter, :add, 3, fn ^broadcast_events_key -> :ok end)
+
+ Authorization
+ |> expect(:build_authorization_params, 2, fn params -> params end)
+ |> expect(:get_write_authorizations, 2, fn _, _ ->
+ {:ok, %Policies{broadcast: %BroadcastPolicies{write: true}}}
+ end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, 3, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+ end
+
+ test "handles missing auth params for private messages", %{tenant: tenant} do
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate -> {:ok, %RateCounter{avg: 0}} end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+ reject(&Connect.lookup_or_start_connection/1)
+
+ messages = %{
+ messages: [%{topic: "topic1", payload: %{"data" => "test"}, event: "event1", private: true}]
+ }
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+
+ assert calls(&TenantBroadcaster.pubsub_broadcast/5) == []
+ end
+ end
+
+ describe "mixed public and private messages" do
+ setup %{tenant: tenant} do
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
+ %{db_conn: db_conn}
+ end
+
+ test "broadcasts both public and private messages together", %{tenant: tenant, db_conn: db_conn} do
+ topic = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ create_rls_policies(db_conn, [:authenticated_write_broadcast], %{topic: topic})
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ topic: topic,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn
+ ^events_per_second_rate ->
+ {:ok, %RateCounter{avg: 0}}
+
+ _ ->
+ {:ok,
+ %RateCounter{
+ avg: 0,
+ limit: %{log: true, value: 10, measurement: :sum, triggered: false, log_fn: fn -> :ok end}
+ }}
+ end)
+
+ expect(GenCounter, :add, 3, fn ^broadcast_events_key -> :ok end)
+ expect(Connect, :lookup_or_start_connection, fn _ -> {:ok, db_conn} end)
+
+ Authorization
+ |> expect(:build_authorization_params, fn params -> params end)
+ |> expect(:get_write_authorizations, fn _, _ ->
+ {:ok, %Policies{broadcast: %BroadcastPolicies{write: true}}}
+ end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, 3, fn _, _, _, _, _ -> :ok end)
+
+ messages = %{
+ messages: [
+ %{topic: "public1", payload: %{"data" => "public"}, event: "event1", private: false},
+ %{topic: topic, payload: %{"data" => "private"}, event: "event2", private: true},
+ %{topic: "public2", payload: %{"data" => "public2"}, event: "event3"}
+ ]
+ }
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+
+ broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/5)
+ assert length(broadcast_calls) == 3
+ end
+ end
+
+ describe "Plug.Conn integration" do
+ test "accepts and converts Plug.Conn to auth params", %{tenant: tenant} do
+ topic = random_string()
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ messages = %{messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1"}]}
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 1, fn _, _, _, _, _ -> :ok end)
+
+ conn =
+ build_conn()
+ |> Map.put(:assigns, %{
+ claims: %{"sub" => "user123", "role" => "authenticated"},
+ role: "authenticated",
+ sub: "user123"
+ })
+ |> Map.put(:req_headers, [{"authorization", "Bearer token"}])
+
+ assert :ok = BatchBroadcast.broadcast(conn, tenant, messages, false)
+ end
+ end
+
+ describe "message validation" do
+ test "returns changeset error when topic is missing", %{tenant: tenant} do
+ messages = %{messages: [%{payload: %{"data" => "test"}, event: "event1"}]}
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, %Ecto.Changeset{valid?: false}} = result
+ end
+
+ test "returns changeset error when payload is missing", %{tenant: tenant} do
+ topic = random_string()
+ messages = %{messages: [%{topic: topic, event: "event1"}]}
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, %Ecto.Changeset{valid?: false}} = result
+ end
+
+ test "returns changeset error when event is missing", %{tenant: tenant} do
+ topic = random_string()
+ messages = %{messages: [%{topic: topic, payload: %{"data" => "test"}}]}
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, %Ecto.Changeset{valid?: false}} = result
+ end
+
+ test "returns changeset error when messages array is empty", %{tenant: tenant} do
+ messages = %{messages: []}
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, %Ecto.Changeset{valid?: false}} = result
+ end
+ end
+
+ describe "rate limiting" do
+ test "rejects broadcast when rate limit is exceeded", %{tenant: tenant} do
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+ topic = random_string()
+ messages = %{messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1"}]}
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate -> {:ok, %RateCounter{avg: tenant.max_events_per_second + 1}} end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, :too_many_requests, "You have exceeded your rate limit"} = result
+ end
+
+ test "rejects broadcast when batch would exceed rate limit", %{tenant: tenant} do
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+
+ messages = %{
+ messages:
+ Enum.map(1..10, fn _ ->
+ %{topic: random_string(), payload: %{"data" => "test"}, event: random_string()}
+ end)
+ }
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate ->
+ {:ok, %RateCounter{avg: tenant.max_events_per_second - 5}}
+ end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+
+ assert {:error, :too_many_requests, "Too many messages to broadcast, please reduce the batch size"} = result
+ end
+
+ test "allows broadcast at rate limit boundary", %{tenant: tenant} do
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ current_rate = tenant.max_events_per_second - 2
+
+ messages = %{
+ messages: [
+ %{topic: random_string(), payload: %{"data" => "test1"}, event: "event1"},
+ %{topic: random_string(), payload: %{"data" => "test2"}, event: "event2"}
+ ]
+ }
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate ->
+ {:ok, %RateCounter{avg: current_rate}}
+ end)
+
+ expect(GenCounter, :add, 2, fn ^broadcast_events_key -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 2, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ end
+
+ test "rejects broadcast when payload size exceeds tenant limit", %{tenant: tenant} do
+ messages = %{
+ messages: [
+ %{
+ topic: random_string(),
+ payload: %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ event: "event1"
+ }
+ ]
+ }
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+
+ assert {:error,
+ %Ecto.Changeset{
+ valid?: false,
+ changes: %{messages: [%{errors: [payload: {"Payload size exceeds tenant limit", []}]}]}
+ }} = result
+ end
+ end
+
+ describe "error handling" do
+ test "returns error when tenant is nil" do
+ messages = %{messages: [%{topic: "topic1", payload: %{"data" => "test"}, event: "event1"}]}
+ assert {:error, :tenant_not_found} = BatchBroadcast.broadcast(nil, nil, messages, false)
+ end
+
+ test "gracefully handles database connection errors for private messages", %{tenant: tenant} do
+ topic = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate -> {:ok, %RateCounter{avg: 0}} end)
+
+ expect(Connect, :lookup_or_start_connection, fn _ -> {:error, :connection_failed} end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ messages = %{
+ messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1", private: true}]
+ }
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+
+ assert calls(&TenantBroadcaster.pubsub_broadcast/5) == []
+ end
+ end
+end
diff --git a/test/realtime/tenants/cache_test.exs b/test/realtime/tenants/cache_test.exs
index 1889c94ef..46577b802 100644
--- a/test/realtime/tenants/cache_test.exs
+++ b/test/realtime/tenants/cache_test.exs
@@ -1,11 +1,11 @@
defmodule Realtime.Tenants.CacheTest do
- alias Realtime.Rpc
# async: false due to the usage of dev_realtime tenant
use Realtime.DataCase, async: false
alias Realtime.Api
- alias Realtime.Tenants.Cache
+ alias Realtime.Rpc
alias Realtime.Tenants
+ alias Realtime.Tenants.Cache
setup do
{:ok, tenant: tenant_fixture()}
@@ -15,10 +15,18 @@ defmodule Realtime.Tenants.CacheTest do
test "tenants cache returns a cached result", %{tenant: tenant} do
external_id = tenant.external_id
assert %Api.Tenant{name: "tenant"} = Cache.get_tenant_by_external_id(external_id)
- Api.update_tenant(tenant, %{name: "new name"})
+
+ changeset = Api.Tenant.changeset(tenant, %{name: "new name"})
+ Repo.update!(changeset)
assert %Api.Tenant{name: "new name"} = Tenants.get_tenant_by_external_id(external_id)
assert %Api.Tenant{name: "tenant"} = Cache.get_tenant_by_external_id(external_id)
end
+
+ test "does not cache when tenant is not found" do
+ assert Cache.get_tenant_by_external_id("not found") == nil
+
+ assert Cachex.exists?(Cache, {:get_tenant_by_external_id, "not found"}) == {:ok, false}
+ end
end
describe "invalidate_tenant_cache/1" do
@@ -38,6 +46,18 @@ defmodule Realtime.Tenants.CacheTest do
end
end
+ describe "update_cache/1" do
+ test "updates the cache given a tenant", %{tenant: tenant} do
+ external_id = tenant.external_id
+ assert %Api.Tenant{name: "tenant"} = Cache.get_tenant_by_external_id(external_id)
+ # Update a tenant
+ updated_tenant = %{tenant | name: "updated name"}
+ # Update cache
+ Cache.update_cache(updated_tenant)
+ assert %Api.Tenant{name: "updated name"} = Cache.get_tenant_by_external_id(external_id)
+ end
+ end
+
describe "distributed_invalidate_tenant_cache/1" do
setup do
{:ok, node} = Clustered.start()
@@ -51,25 +71,21 @@ defmodule Realtime.Tenants.CacheTest do
dummy_name = random_string()
# Ensure cache has the values
- Cachex.put!(
- Realtime.Tenants.Cache,
- {{:get_tenant_by_external_id, 1}, [external_id]},
- {:cached, %{tenant | name: dummy_name}}
- )
-
- Rpc.enhanced_call(node, Cachex, :put!, [
- Realtime.Tenants.Cache,
- {{:get_tenant_by_external_id, 1}, [external_id]},
- {:cached, %{tenant | name: dummy_name}}
- ])
+ Realtime.Tenants.Cache.update_cache(%{tenant | name: dummy_name})
+
+ Rpc.enhanced_call(node, Realtime.Tenants.Cache, :update_cache, [%{tenant | name: dummy_name}])
# Cache showing old value
- assert %Api.Tenant{name: ^dummy_name} = Cache.get_tenant_by_external_id(external_id)
- assert %Api.Tenant{name: ^dummy_name} = Rpc.enhanced_call(node, Cache, :get_tenant_by_external_id, [external_id])
+ assert {:ok, %Api.Tenant{name: ^dummy_name}} = Cachex.get(Cache, {:get_tenant_by_external_id, external_id})
+
+ assert {:ok, %Api.Tenant{name: ^dummy_name}} =
+ Rpc.enhanced_call(node, Cachex, :get, [Cache, {:get_tenant_by_external_id, external_id}])
# Invalidate cache
- assert true = Cache.distributed_invalidate_tenant_cache(external_id)
+ assert :ok = Cache.distributed_invalidate_tenant_cache(external_id)
+ # wait for cache to be invalidated in both nodes
+ Process.sleep(200)
# Cache showing new value
assert %Api.Tenant{name: ^expected_name} = Cache.get_tenant_by_external_id(external_id)
@@ -77,4 +93,39 @@ defmodule Realtime.Tenants.CacheTest do
Rpc.enhanced_call(node, Cache, :get_tenant_by_external_id, [external_id])
end
end
+
+ describe "global_cache_update/1" do
+ setup do
+ {:ok, node} = Clustered.start()
+ %{node: node}
+ end
+
+ test "update the cache given a tenant_id", %{node: node} do
+ external_id = "dev_tenant"
+ %Api.Tenant{name: expected_name} = tenant = Tenants.get_tenant_by_external_id(external_id)
+
+ dummy_name = random_string()
+
+ # Ensure cache has the values
+ Realtime.Tenants.Cache.update_cache(%{tenant | name: dummy_name})
+
+ Rpc.enhanced_call(node, Cache, :update_cache, [%{tenant | name: dummy_name}])
+
+ # Cache showing old value
+ assert %Api.Tenant{name: ^dummy_name} = Cache.get_tenant_by_external_id(external_id)
+ assert %Api.Tenant{name: ^dummy_name} = Rpc.enhanced_call(node, Cache, :get_tenant_by_external_id, [external_id])
+
+ # Update cache
+ assert :ok = Cache.global_cache_update(tenant)
+
+ # wait for cache to be updated in both nodes
+ Process.sleep(200)
+
+ # Cache showing new value
+ assert {:ok, %Api.Tenant{name: ^expected_name}} = Cachex.get(Cache, {:get_tenant_by_external_id, external_id})
+
+ assert {:ok, %Api.Tenant{name: ^expected_name}} =
+ Rpc.enhanced_call(node, Cachex, :get, [Cache, {:get_tenant_by_external_id, external_id}])
+ end
+ end
end
diff --git a/test/realtime/tenants/connect/register_process_test.exs b/test/realtime/tenants/connect/register_process_test.exs
index d4227996f..02cc33391 100644
--- a/test/realtime/tenants/connect/register_process_test.exs
+++ b/test/realtime/tenants/connect/register_process_test.exs
@@ -7,7 +7,7 @@ defmodule Realtime.Tenants.Connect.RegisterProcessTest do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
{:ok, conn} = Database.connect(tenant, "realtime_test")
%{tenant_id: tenant.external_id, db_conn_pid: conn}
end
diff --git a/test/realtime/tenants/connect_test.exs b/test/realtime/tenants/connect_test.exs
index 290fb1c8d..0b594de5d 100644
--- a/test/realtime/tenants/connect_test.exs
+++ b/test/realtime/tenants/connect_test.exs
@@ -50,7 +50,50 @@ defmodule Realtime.Tenants.ConnectTest do
end
end
+ describe "list_tenants/0" do
+ test "lists all tenants with active connections", %{tenant: tenant1} do
+ tenant2 = Containers.checkout_tenant(run_migrations: true)
+ assert {:ok, _} = Connect.lookup_or_start_connection(tenant1.external_id)
+ assert {:ok, _} = Connect.lookup_or_start_connection(tenant2.external_id)
+
+ list_tenants = Connect.list_tenants() |> MapSet.new()
+ tenants = MapSet.new([tenant1.external_id, tenant2.external_id])
+
+ assert MapSet.subset?(tenants, list_tenants)
+ end
+ end
+
describe "handle cold start" do
+ test "multiple processes connecting calling Connect.connect", %{tenant: tenant} do
+ parent = self()
+
+ # Let's slow down Connect.connect so that multiple RPC calls are executed
+ stub(Connect, :connect, fn x, y, z ->
+ :timer.sleep(1000)
+ call_original(Connect, :connect, [x, y, z])
+ end)
+
+ connect = fn -> send(parent, Connect.lookup_or_start_connection(tenant.external_id)) end
+ # Let's call enough times to potentially trigger the Connect RateCounter
+
+ for _ <- 1..50, do: spawn(connect)
+
+ assert_receive({:ok, pid}, 1100)
+
+ for _ <- 1..49, do: assert_receive({:ok, ^pid})
+
+ # Does not trigger rate limit as connections eventually succeeded
+
+ {:ok, rate_counter} =
+ tenant.external_id
+ |> Tenants.connect_errors_per_second_rate()
+ |> Realtime.RateCounter.get()
+
+ assert rate_counter.sum == 0
+ assert rate_counter.avg == 0.0
+ assert rate_counter.limit.triggered == false
+ end
+
test "multiple proccesses succeed together", %{tenant: tenant} do
parent = self()
@@ -78,12 +121,55 @@ defmodule Realtime.Tenants.ConnectTest do
assert_receive {:ok, ^pid}
end
- test "more than 5 seconds passed error out", %{tenant: tenant} do
+ test "more than 15 seconds passed error out", %{tenant: tenant} do
parent = self()
# Let's slow down Connect starting
expect(Database, :check_tenant_connection, fn t ->
- :timer.sleep(5500)
+ Process.sleep(15500)
+ call_original(Database, :check_tenant_connection, [t])
+ end)
+
+ connect = fn -> send(parent, Connect.lookup_or_start_connection(tenant.external_id)) end
+
+ spawn(connect)
+ spawn(connect)
+
+ {:error, :initializing} = Connect.lookup_or_start_connection(tenant.external_id)
+ # The above call waited 15 seconds
+ assert_receive {:error, :initializing}
+ assert_receive {:error, :initializing}
+
+ # This one will succeed
+ {:ok, _pid} = Connect.lookup_or_start_connection(tenant.external_id)
+ end
+
+ test "too many db connections", %{tenant: tenant} do
+ extension = %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "us-east-1",
+ "ssl_enforced" => false,
+ "db_pool" => 100,
+ "subcriber_pool_size" => 100,
+ "subs_pool_size" => 100
+ }
+ }
+
+ {:ok, tenant} = update_extension(tenant, extension)
+
+ parent = self()
+
+ # Let's slow down Connect starting
+ expect(Database, :check_tenant_connection, fn t ->
+ :timer.sleep(1000)
call_original(Database, :check_tenant_connection, [t])
end)
@@ -97,12 +183,13 @@ defmodule Realtime.Tenants.ConnectTest do
spawn(connect)
spawn(connect)
- {:error, :tenant_database_unavailable} = Connect.lookup_or_start_connection(tenant.external_id)
+ # This one should block and wait for the first Connect
+ {:error, :tenant_db_too_many_connections} = Connect.lookup_or_start_connection(tenant.external_id)
- # Only one will succeed the others timed out waiting
- assert_receive {:error, :tenant_database_unavailable}
- assert_receive {:error, :tenant_database_unavailable}
- assert_receive {:ok, _pid}, 7000
+ assert_receive {:error, :tenant_db_too_many_connections}
+ assert_receive {:error, :tenant_db_too_many_connections}
+ assert_receive {:error, :tenant_db_too_many_connections}
+ refute_receive _any
end
end
@@ -254,9 +341,9 @@ defmodule Realtime.Tenants.ConnectTest do
region = Tenants.region(tenant)
assert {_pid, %{conn: ^db_conn, region: ^region}} = :syn.lookup(Connect, external_id)
Process.sleep(1000)
- :syn.leave(:users, external_id, self())
+ external_id |> UsersCounter.scope() |> :syn.leave(external_id, self())
Process.sleep(1000)
- assert :undefined = :syn.lookup(Connect, external_id)
+ assert :undefined = external_id |> UsersCounter.scope() |> :syn.lookup(external_id)
refute Process.alive?(db_conn)
Connect.shutdown(external_id)
end
@@ -267,6 +354,34 @@ defmodule Realtime.Tenants.ConnectTest do
assert {:error, :tenant_suspended} = Connect.lookup_or_start_connection(tenant.external_id)
end
+ test "tenant not able to connect if database has not enough connections", %{
+ tenant: tenant
+ } do
+ extension = %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "us-east-1",
+ "ssl_enforced" => false,
+ "db_pool" => 100,
+ "subcriber_pool_size" => 100,
+ "subs_pool_size" => 100
+ }
+ }
+
+ {:ok, tenant} = update_extension(tenant, extension)
+
+ assert capture_log(fn ->
+ assert {:error, :tenant_db_too_many_connections} = Connect.lookup_or_start_connection(tenant.external_id)
+ end) =~ ~r/Only \d+ available connections\. At least \d+ connections are required/
+ end
+
test "handles tenant suspension and unsuspension in a reactive way", %{tenant: tenant} do
assert {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
@@ -352,11 +467,13 @@ defmodule Realtime.Tenants.ConnectTest do
assert replication_connection_before == replication_connection_after
end
- test "on replication connection postgres pid being stopped, also kills the Connect module", %{tenant: tenant} do
+ test "on replication connection postgres pid being stopped, Connect module recovers it", %{tenant: tenant} do
assert {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
replication_connection_pid = ReplicationConnection.whereis(tenant.external_id)
+ Process.monitor(replication_connection_pid)
+
assert Process.alive?(replication_connection_pid)
pid = Connect.whereis(tenant.external_id)
@@ -366,21 +483,33 @@ defmodule Realtime.Tenants.ConnectTest do
[]
)
- assert_process_down(replication_connection_pid)
- assert_process_down(pid)
+ assert_receive {:DOWN, _, :process, ^replication_connection_pid, _}
+
+ Process.sleep(1500)
+ new_replication_connection_pid = ReplicationConnection.whereis(tenant.external_id)
+
+ assert replication_connection_pid != new_replication_connection_pid
+ assert Process.alive?(new_replication_connection_pid)
+ assert Process.alive?(pid)
end
- test "on replication connection exit, also kills the Connect module", %{tenant: tenant} do
+ test "on replication connection exit, Connect module recovers it", %{tenant: tenant} do
assert {:ok, _db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
replication_connection_pid = ReplicationConnection.whereis(tenant.external_id)
+ Process.monitor(replication_connection_pid)
assert Process.alive?(replication_connection_pid)
pid = Connect.whereis(tenant.external_id)
Process.exit(replication_connection_pid, :kill)
+ assert_receive {:DOWN, _, :process, ^replication_connection_pid, _}
- assert_process_down(replication_connection_pid)
- assert_process_down(pid)
+ Process.sleep(1500)
+ new_replication_connection_pid = ReplicationConnection.whereis(tenant.external_id)
+
+ assert replication_connection_pid != new_replication_connection_pid
+ assert Process.alive?(new_replication_connection_pid)
+ assert Process.alive?(pid)
end
test "handles max_wal_senders by logging the correct operational code", %{tenant: tenant} do
@@ -429,6 +558,53 @@ defmodule Realtime.Tenants.ConnectTest do
assert capture_log(fn -> assert {:error, :rpc_error, _} = Connect.lookup_or_start_connection("tenant") end) =~
"project=tenant external_id=tenant [error] ErrorOnRpcCall"
end
+
+ test "rate limit connect when too many connections against bad database", %{tenant: tenant} do
+ extension = %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "us-east-1",
+ "ssl_enforced" => true
+ }
+ }
+
+ {:ok, tenant} = update_extension(tenant, extension)
+
+ log =
+ capture_log(fn ->
+ res =
+ for _ <- 1..50 do
+ Process.sleep(200)
+ Connect.lookup_or_start_connection(tenant.external_id)
+ end
+
+ assert Enum.any?(res, fn {_, res} -> res == :connect_rate_limit_reached end)
+ end)
+
+ assert log =~ "DatabaseConnectionRateLimitReached: Too many connection attempts against the tenant database"
+ end
+
+ test "rate limit connect will not trigger if connection is successful", %{tenant: tenant} do
+ log =
+ capture_log(fn ->
+ res =
+ for _ <- 1..20 do
+ Process.sleep(500)
+ Connect.lookup_or_start_connection(tenant.external_id)
+ end
+
+ refute Enum.any?(res, fn {_, res} -> res == :tenant_db_too_many_connections end)
+ end)
+
+ refute log =~ "DatabaseConnectionRateLimitReached: Too many connection attempts against the tenant database"
+ end
end
describe "shutdown/1" do
@@ -449,30 +625,6 @@ defmodule Realtime.Tenants.ConnectTest do
test "if tenant does not exist, does nothing" do
assert :ok = Connect.shutdown("none")
end
-
- test "tenant not able to connect if database has not enough connections", %{tenant: tenant} do
- extension = %{
- "type" => "postgres_cdc_rls",
- "settings" => %{
- "db_host" => "127.0.0.1",
- "db_name" => "postgres",
- "db_user" => "supabase_admin",
- "db_password" => "postgres",
- "poll_interval" => 100,
- "poll_max_changes" => 100,
- "poll_max_record_bytes" => 1_048_576,
- "region" => "us-east-1",
- "ssl_enforced" => false,
- "db_pool" => 100,
- "subcriber_pool_size" => 100,
- "subs_pool_size" => 100
- }
- }
-
- {:ok, tenant} = update_extension(tenant, extension)
-
- assert {:error, :tenant_db_too_many_connections} = Connect.lookup_or_start_connection(tenant.external_id)
- end
end
describe "registers into local registry" do
@@ -519,6 +671,6 @@ defmodule Realtime.Tenants.ConnectTest do
put_in(extension, ["settings", "db_port"], db_port)
]
- Realtime.Api.update_tenant(tenant, %{extensions: extensions})
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
end
end
diff --git a/test/realtime/tenants/janitor/maintenance_task_test.exs b/test/realtime/tenants/janitor/maintenance_task_test.exs
index f4c51436e..5d4aea474 100644
--- a/test/realtime/tenants/janitor/maintenance_task_test.exs
+++ b/test/realtime/tenants/janitor/maintenance_task_test.exs
@@ -4,20 +4,26 @@ defmodule Realtime.Tenants.Janitor.MaintenanceTaskTest do
alias Realtime.Tenants.Janitor.MaintenanceTask
alias Realtime.Api.Message
alias Realtime.Database
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
%{tenant: tenant}
end
test "cleans messages older than 72 hours and creates partitions", %{tenant: tenant} do
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+
utc_now = NaiveDateTime.utc_now()
limit = NaiveDateTime.add(utc_now, -72, :hour)
+ date_start = Date.utc_today() |> Date.add(-10)
+ date_end = Date.utc_today()
+ create_messages_partitions(conn, date_start, date_end)
+
messages =
for days <- -5..0 do
inserted_at = NaiveDateTime.add(utc_now, days, :day)
@@ -27,12 +33,11 @@ defmodule Realtime.Tenants.Janitor.MaintenanceTaskTest do
to_keep =
messages
- |> Enum.reject(&(NaiveDateTime.compare(limit, &1.inserted_at) == :gt))
+ |> Enum.reject(&(NaiveDateTime.compare(NaiveDateTime.beginning_of_day(limit), &1.inserted_at) == :gt))
|> MapSet.new()
assert MaintenanceTask.run(tenant.external_id) == :ok
- {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
{:ok, res} = Repo.all(conn, from(m in Message), Message)
verify_partitions(conn)
@@ -63,7 +68,7 @@ defmodule Realtime.Tenants.Janitor.MaintenanceTaskTest do
tenant = tenant_fixture(%{extensions: extensions})
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
Process.flag(:trap_exit, true)
@@ -80,7 +85,7 @@ defmodule Realtime.Tenants.Janitor.MaintenanceTaskTest do
defp verify_partitions(conn) do
today = Date.utc_today()
- yesterday = Date.add(today, -1)
+ yesterday = Date.add(today, -3)
future = Date.add(today, 3)
dates = Date.range(yesterday, future)
diff --git a/test/realtime/tenants/janitor_test.exs b/test/realtime/tenants/janitor_test.exs
index 4ac1a0eda..aa32b86f8 100644
--- a/test/realtime/tenants/janitor_test.exs
+++ b/test/realtime/tenants/janitor_test.exs
@@ -6,9 +6,9 @@ defmodule Realtime.Tenants.JanitorTest do
alias Realtime.Api.Message
alias Realtime.Database
- alias Realtime.Repo
alias Realtime.Tenants.Janitor
alias Realtime.Tenants.Connect
+ alias Realtime.Tenants.Repo
setup do
:ets.delete_all_objects(Connect)
@@ -24,13 +24,21 @@ defmodule Realtime.Tenants.JanitorTest do
Enum.map(
[tenant1, tenant2],
fn tenant ->
- tenant = Repo.preload(tenant, :extensions)
+ tenant = Realtime.Repo.preload(tenant, :extensions)
Connect.lookup_or_start_connection(tenant.external_id)
Process.sleep(500)
tenant
end
)
+ date_start = Date.utc_today() |> Date.add(-10)
+ date_end = Date.utc_today()
+
+ Enum.map(tenants, fn tenant ->
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+ create_messages_partitions(conn, date_start, date_end)
+ end)
+
start_supervised!(
{Task.Supervisor,
name: Realtime.Tenants.Janitor.TaskSupervisor, max_children: 5, max_seconds: 500, max_restarts: 1}
@@ -62,7 +70,7 @@ defmodule Realtime.Tenants.JanitorTest do
to_keep =
messages
- |> Enum.reject(&(NaiveDateTime.compare(limit, &1.inserted_at) == :gt))
+ |> Enum.reject(&(NaiveDateTime.compare(NaiveDateTime.beginning_of_day(limit), &1.inserted_at) == :gt))
|> MapSet.new()
start_supervised!(Janitor)
@@ -105,7 +113,7 @@ defmodule Realtime.Tenants.JanitorTest do
to_keep =
messages
- |> Enum.reject(&(NaiveDateTime.compare(limit, &1.inserted_at) == :gt))
+ |> Enum.reject(&(NaiveDateTime.compare(NaiveDateTime.beginning_of_day(limit), &1.inserted_at) == :gt))
|> MapSet.new()
start_supervised!(Janitor)
@@ -162,7 +170,7 @@ defmodule Realtime.Tenants.JanitorTest do
defp verify_partitions(conn) do
today = Date.utc_today()
- yesterday = Date.add(today, -1)
+ yesterday = Date.add(today, -3)
future = Date.add(today, 3)
dates = Date.range(yesterday, future)
diff --git a/test/realtime/tenants/rebalancer_test.exs b/test/realtime/tenants/rebalancer_test.exs
index ac8e1ea36..d91e7e675 100644
--- a/test/realtime/tenants/rebalancer_test.exs
+++ b/test/realtime/tenants/rebalancer_test.exs
@@ -9,7 +9,7 @@ defmodule Realtime.Tenants.RebalancerTest do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
%{tenant: tenant}
end
diff --git a/test/realtime/tenants/replication_connection_test.exs b/test/realtime/tenants/replication_connection_test.exs
index 783270313..031f3cae6 100644
--- a/test/realtime/tenants/replication_connection_test.exs
+++ b/test/realtime/tenants/replication_connection_test.exs
@@ -11,6 +11,9 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
alias Realtime.Tenants
alias Realtime.Tenants.ReplicationConnection
alias RealtimeWeb.Endpoint
+ alias Realtime.Tenants.Repo
+
+ @replication_slot_name "supabase_realtime_messages_replication_slot_test"
setup do
slot = Application.get_env(:realtime, :slot_name_suffix)
@@ -20,11 +23,9 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
tenant = Containers.checkout_tenant(run_migrations: true)
{:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- name = "supabase_realtime_messages_replication_slot_test"
- Postgrex.query(db_conn, "SELECT pg_drop_replication_slot($1)", [name])
- Process.exit(db_conn, :normal)
+ Postgrex.query(db_conn, "SELECT pg_drop_replication_slot($1)", [@replication_slot_name])
- %{tenant: tenant}
+ %{tenant: tenant, db_conn: db_conn}
end
describe "temporary process" do
@@ -70,7 +71,7 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
assert {:error, _} = ReplicationConnection.start(tenant, self())
end
- test "starts a handler for the tenant and broadcasts", %{tenant: tenant} do
+ test "starts a handler for the tenant and broadcasts", %{tenant: tenant, db_conn: db_conn} do
start_link_supervised!(
{ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
restart: :transient
@@ -98,8 +99,8 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
payload = %{
"event" => "INSERT",
+ "meta" => %{"id" => row.id},
"payload" => %{
- "id" => row.id,
"value" => value
},
"type" => "broadcast"
@@ -121,8 +122,89 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
})
end
- {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- {:ok, _} = Realtime.Repo.insert_all_entries(db_conn, messages, Message)
+ {:ok, _} = Repo.insert_all_entries(db_conn, messages, Message)
+
+ messages_received =
+ for _ <- 1..total_messages, into: [] do
+ assert_receive {:socket_push, :text, data}
+ data |> IO.iodata_to_binary() |> Jason.decode!()
+ end
+
+ for row <- messages do
+ assert Enum.count(messages_received, fn message_received ->
+ value = row |> Map.from_struct() |> get_in([:changes, :payload, "value"])
+
+ match?(
+ %{
+ "event" => "broadcast",
+ "payload" => %{
+ "event" => "INSERT",
+ "meta" => %{"id" => _id},
+ "payload" => %{
+ "value" => ^value
+ }
+ },
+ "ref" => nil,
+ "topic" => ^topic
+ },
+ message_received
+ )
+ end) == 1
+ end
+ end
+
+ test "starts a handler for the tenant and broadcasts to public channel", %{tenant: tenant, db_conn: db_conn} do
+ start_link_supervised!(
+ {ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
+ restart: :transient
+ )
+
+ topic = random_string()
+ tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, true)
+ subscribe(tenant_topic, topic)
+
+ total_messages = 5
+ # Works with one insert per transaction
+ for _ <- 1..total_messages do
+ value = random_string()
+
+ row =
+ message_fixture(tenant, %{
+ "topic" => topic,
+ "private" => false,
+ "event" => "INSERT",
+ "payload" => %{"value" => value}
+ })
+
+ assert_receive {:socket_push, :text, data}
+ message = data |> IO.iodata_to_binary() |> Jason.decode!()
+
+ payload = %{
+ "event" => "INSERT",
+ "meta" => %{"id" => row.id},
+ "payload" => %{
+ "value" => value
+ },
+ "type" => "broadcast"
+ }
+
+ assert message == %{"event" => "broadcast", "payload" => payload, "ref" => nil, "topic" => topic}
+ end
+
+ Process.sleep(500)
+ # Works with batch inserts
+ messages =
+ for _ <- 1..total_messages do
+ Message.changeset(%Message{}, %{
+ "topic" => topic,
+ "private" => false,
+ "event" => "INSERT",
+ "extension" => "broadcast",
+ "payload" => %{"value" => random_string()}
+ })
+ end
+
+ {:ok, _} = Repo.insert_all_entries(db_conn, messages, Message)
messages_received =
for _ <- 1..total_messages, into: [] do
@@ -139,8 +221,8 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
"event" => "broadcast",
"payload" => %{
"event" => "INSERT",
+ "meta" => %{"id" => _id},
"payload" => %{
- "id" => _,
"value" => ^value
}
},
@@ -153,6 +235,113 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
end
end
+ test "replicates binary with exactly 16 bytes to test UUID conversion error", %{tenant: tenant} do
+ start_link_supervised!(
+ {ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
+ restart: :transient
+ )
+
+ topic = "db:job_scheduler"
+ tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
+ subscribe(tenant_topic, topic)
+ payload = %{"value" => random_string()}
+
+ row =
+ message_fixture(tenant, %{
+ "topic" => topic,
+ "private" => true,
+ "event" => "UPDATE",
+ "extension" => "broadcast",
+ "payload" => payload
+ })
+
+ row_id = row.id
+
+ assert_receive {:socket_push, :text, data}, 2000
+ message = data |> IO.iodata_to_binary() |> Jason.decode!()
+
+ assert %{
+ "event" => "broadcast",
+ "payload" => %{
+ "event" => "UPDATE",
+ "meta" => %{"id" => ^row_id},
+ "payload" => received_payload,
+ "type" => "broadcast"
+ },
+ "ref" => nil,
+ "topic" => ^topic
+ } = message
+
+ assert received_payload == payload
+ end
+
+ test "should not process unsupported relations", %{tenant: tenant, db_conn: db_conn} do
+ # update
+ queries = [
+ "DROP TABLE IF EXISTS public.test",
+ """
+ CREATE TABLE "public"."test" (
+ "id" int4 NOT NULL default nextval('test_id_seq'::regclass),
+ "details" text,
+ PRIMARY KEY ("id"));
+ """
+ ]
+
+ Postgrex.transaction(db_conn, fn conn ->
+ Enum.each(queries, &Postgrex.query!(conn, &1, []))
+ end)
+
+ logs =
+ capture_log(fn ->
+ start_link_supervised!(
+ {ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
+ restart: :transient
+ )
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert_publication_contains_only_messages(db_conn, "supabase_realtime_messages_publication")
+
+ # Add table to publication to test the error handling
+ Postgrex.query!(db_conn, "ALTER PUBLICATION supabase_realtime_messages_publication ADD TABLE public.test", [])
+ %{rows: [[_id]]} = Postgrex.query!(db_conn, "insert into test (details) values ('test') returning id", [])
+
+ topic = "db:job_scheduler"
+ tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
+ subscribe(tenant_topic, topic)
+ payload = %{"value" => random_string()}
+
+ row =
+ message_fixture(tenant, %{
+ "topic" => topic,
+ "private" => true,
+ "event" => "UPDATE",
+ "extension" => "broadcast",
+ "payload" => payload
+ })
+
+ row_id = row.id
+
+ assert_receive {:socket_push, :text, data}, 2000
+ message = data |> IO.iodata_to_binary() |> Jason.decode!()
+
+ assert %{
+ "event" => "broadcast",
+ "payload" => %{
+ "event" => "UPDATE",
+ "meta" => %{"id" => ^row_id},
+ "payload" => received_payload,
+ "type" => "broadcast"
+ },
+ "ref" => nil,
+ "topic" => ^topic
+ } = message
+
+ assert received_payload == payload
+ end)
+
+ assert logs =~ "Unexpected relation on schema 'public' and table 'test'"
+ end
+
test "monitored pid stopping brings down ReplicationConnection ", %{tenant: tenant} do
monitored_pid =
spawn(fn ->
@@ -204,7 +393,32 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
assert logs =~ "UnableToBroadcastChanges"
end
- test "payload without id", %{tenant: tenant} do
+ test "message that exceeds payload size logs error", %{tenant: tenant} do
+ logs =
+ capture_log(fn ->
+ start_supervised!(
+ {ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
+ restart: :transient
+ )
+
+ topic = random_string()
+ tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
+ assert :ok = Endpoint.subscribe(tenant_topic)
+
+ message_fixture(tenant, %{
+ "event" => random_string(),
+ "topic" => random_string(),
+ "private" => true,
+ "payload" => %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)}
+ })
+
+ refute_receive %Phoenix.Socket.Broadcast{}, 500
+ end)
+
+ assert logs =~ "UnableToBroadcastChanges: %{messages: [%{payload: [\"Payload size exceeds tenant limit\"]}]}"
+ end
+
+ test "payload without id", %{tenant: tenant, db_conn: db_conn} do
start_link_supervised!(
{ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
restart: :transient
@@ -214,33 +428,39 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
subscribe(tenant_topic, topic)
- fixture =
- message_fixture(tenant, %{
- "topic" => topic,
- "private" => true,
- "event" => "INSERT",
- "payload" => %{"value" => "something"}
- })
+ value = "something"
+ event = "INSERT"
+
+ Postgrex.query!(
+ db_conn,
+ "SELECT realtime.send (json_build_object ('value', $1 :: text)::jsonb, $2 :: text, $3 :: text, TRUE::bool);",
+ [value, event, topic]
+ )
+
+ {:ok, [%{id: id}]} = Repo.all(db_conn, from(m in Message), Message)
assert_receive {:socket_push, :text, data}, 500
message = data |> IO.iodata_to_binary() |> Jason.decode!()
assert %{
"event" => "broadcast",
- "payload" => %{"event" => "INSERT", "payload" => payload, "type" => "broadcast"},
+ "payload" => %{
+ "event" => "INSERT",
+ "meta" => %{"id" => ^id},
+ "payload" => payload,
+ "type" => "broadcast"
+ },
"ref" => nil,
"topic" => ^topic
} = message
- id = fixture.id
-
assert payload == %{
"value" => "something",
"id" => id
}
end
- test "payload including id", %{tenant: tenant} do
+ test "payload including id", %{tenant: tenant, db_conn: db_conn} do
start_link_supervised!(
{ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
restart: :transient
@@ -250,21 +470,29 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
subscribe(tenant_topic, topic)
- payload = %{"value" => "something", "id" => "123456"}
+ id = "123456"
+ value = "something"
+ event = "INSERT"
- message_fixture(tenant, %{
- "topic" => topic,
- "private" => true,
- "event" => "INSERT",
- "payload" => payload
- })
+ Postgrex.query!(
+ db_conn,
+ "SELECT realtime.send (json_build_object ('value', $1 :: text, 'id', $2 :: text)::jsonb, $3 :: text, $4 :: text, TRUE::bool);",
+ [value, id, event, topic]
+ )
+
+ {:ok, [%{id: message_id}]} = Repo.all(db_conn, from(m in Message), Message)
assert_receive {:socket_push, :text, data}, 500
message = data |> IO.iodata_to_binary() |> Jason.decode!()
assert %{
"event" => "broadcast",
- "payload" => %{"event" => "INSERT", "payload" => ^payload, "type" => "broadcast"},
+ "payload" => %{
+ "meta" => %{"id" => ^message_id},
+ "event" => "INSERT",
+ "payload" => %{"value" => "something", "id" => ^id},
+ "type" => "broadcast"
+ },
"ref" => nil,
"topic" => ^topic
} = message
@@ -272,7 +500,7 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
test "fails on existing replication slot", %{tenant: tenant} do
{:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- name = "supabase_realtime_messages_replication_slot_test"
+ name = @replication_slot_name
Postgrex.query!(db_conn, "SELECT pg_create_logical_replication_slot($1, 'test_decoding')", [name])
@@ -331,6 +559,118 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
assert {:error, :max_wal_senders_reached} = ReplicationConnection.start(tenant, self())
end
+
+ test "handles WAL pressure gracefully", %{tenant: tenant} do
+ {:ok, replication_pid} = ReplicationConnection.start(tenant, self())
+
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+ on_exit(fn -> Process.exit(conn, :normal) end)
+
+ large_payload = String.duplicate("x", 10 * 1024 * 1024)
+
+ for i <- 1..5 do
+ message_fixture_with_conn(tenant, conn, %{
+ "topic" => "stress_#{i}",
+ "private" => true,
+ "event" => "INSERT",
+ "payload" => %{"data" => large_payload}
+ })
+ end
+
+ assert Process.alive?(replication_pid)
+ end
+ end
+
+ describe "publication validation steps" do
+ test "if proper tables are included, starts replication", %{tenant: tenant, db_conn: db_conn} do
+ publication_name = "supabase_realtime_messages_publication"
+
+ Postgrex.query!(db_conn, "DROP PUBLICATION IF EXISTS #{publication_name}", [])
+ Postgrex.query!(db_conn, "CREATE PUBLICATION #{publication_name} FOR TABLE realtime.messages", [])
+
+ logs =
+ capture_log(fn ->
+ {:ok, pid} = ReplicationConnection.start(tenant, self())
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert Process.alive?(pid)
+ assert_publication_contains_only_messages(db_conn, publication_name)
+
+ Process.exit(pid, :shutdown)
+ end)
+
+ refute logs =~ "Recreating"
+ end
+
+ test "if includes unexpected tables, recreates publication", %{tenant: tenant, db_conn: db_conn} do
+ publication_name = "supabase_realtime_messages_publication"
+
+ Postgrex.query!(db_conn, "DROP PUBLICATION IF EXISTS #{publication_name}", [])
+ Postgrex.query!(db_conn, "CREATE TABLE IF NOT EXISTS public.wrong_table (id int)", [])
+ Postgrex.query!(db_conn, "CREATE PUBLICATION #{publication_name} FOR TABLE public.wrong_table", [])
+
+ logs =
+ capture_log(fn ->
+ {:ok, pid} = ReplicationConnection.start(tenant, self())
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert Process.alive?(pid)
+ assert_publication_contains_only_messages(db_conn, publication_name)
+
+ Process.exit(pid, :shutdown)
+ end)
+
+ assert logs =~ "Recreating"
+ end
+
+ test "recreates publication if it has no tables", %{tenant: tenant, db_conn: db_conn} do
+ publication_name = "supabase_realtime_messages_publication"
+
+ Postgrex.query!(db_conn, "DROP PUBLICATION IF EXISTS #{publication_name}", [])
+ Postgrex.query!(db_conn, "CREATE PUBLICATION #{publication_name}", [])
+
+ logs =
+ capture_log(fn ->
+ {:ok, pid} = ReplicationConnection.start(tenant, self())
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert Process.alive?(pid)
+ assert_publication_contains_only_messages(db_conn, publication_name)
+
+ Process.exit(pid, :shutdown)
+ end)
+
+ assert logs =~ "Recreating"
+ end
+
+ test "recreates publication if it has expected tables and unexpected tables under same publication", %{
+ tenant: tenant,
+ db_conn: db_conn
+ } do
+ publication_name = "supabase_realtime_messages_publication"
+
+ Postgrex.query!(db_conn, "DROP PUBLICATION IF EXISTS #{publication_name}", [])
+ Postgrex.query!(db_conn, "CREATE TABLE IF NOT EXISTS public.extra_table (id int)", [])
+
+ Postgrex.query!(
+ db_conn,
+ "CREATE PUBLICATION #{publication_name} FOR TABLE realtime.messages, public.extra_table",
+ []
+ )
+
+ logs =
+ capture_log(fn ->
+ {:ok, pid} = ReplicationConnection.start(tenant, self())
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert Process.alive?(pid)
+ assert_publication_contains_only_messages(db_conn, publication_name)
+
+ Process.exit(pid, :shutdown)
+ end)
+
+ assert logs =~ "Recreating"
+ end
end
describe "whereis/1" do
@@ -378,7 +718,7 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
"payload" => %{"value" => random_string()}
})
- assert_receive {:socket_push, :text, data}
+ assert_receive {:socket_push, :text, data}, 500
message = data |> IO.iodata_to_binary() |> Jason.decode!()
assert %{"event" => "broadcast", "payload" => _, "ref" => nil, "topic" => ^topic} = message
@@ -409,4 +749,59 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
ref = Process.monitor(pid)
assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
end
+
+ defp message_fixture_with_conn(_tenant, conn, override) do
+ create_attrs = %{
+ "topic" => random_string(),
+ "extension" => "broadcast"
+ }
+
+ override = override |> Enum.map(fn {k, v} -> {"#{k}", v} end) |> Map.new()
+
+ {:ok, message} =
+ create_attrs
+ |> Map.merge(override)
+ |> TenantConnection.create_message(conn)
+
+ message
+ end
+
+ defp assert_publication_contains_only_messages(db_conn, publication_name) do
+ %{rows: rows} =
+ Postgrex.query!(
+ db_conn,
+ "SELECT schemaname, tablename FROM pg_publication_tables WHERE pubname = $1",
+ [publication_name]
+ )
+
+ valid_tables =
+ Enum.all?(rows, fn [schema, table] ->
+ schema == "realtime" and (table == "messages" or String.starts_with?(table, "messages_"))
+ end)
+
+ assert valid_tables, "Expected only realtime.messages or its partitions, got: #{inspect(rows)}"
+ end
+
+ defp assert_replication_started(db_conn, slot_name, retries \\ 10, interval_ms \\ 10) do
+ case check_replication_status(db_conn, slot_name, retries, interval_ms) do
+ :ok -> :ok
+ :error -> flunk("Replication slot #{slot_name} did not become active")
+ end
+ end
+
+ defp check_replication_status(_db_conn, _slot_name, 0, _interval_ms), do: :error
+
+ defp check_replication_status(db_conn, slot_name, retries_remaining, interval_ms) do
+ %{rows: rows} =
+ Postgrex.query!(db_conn, "SELECT active FROM pg_replication_slots WHERE slot_name = $1", [slot_name])
+
+ case rows do
+ [[true]] ->
+ :ok
+
+ _ ->
+ Process.sleep(interval_ms)
+ check_replication_status(db_conn, slot_name, retries_remaining - 1, interval_ms)
+ end
+ end
end
diff --git a/test/realtime/repo_test.exs b/test/realtime/tenants/repo_test.exs
similarity index 99%
rename from test/realtime/repo_test.exs
rename to test/realtime/tenants/repo_test.exs
index 7d6841b01..697274494 100644
--- a/test/realtime/repo_test.exs
+++ b/test/realtime/tenants/repo_test.exs
@@ -1,10 +1,10 @@
-defmodule Realtime.RepoTest do
+defmodule Realtime.Tenants.RepoTest do
use Realtime.DataCase, async: true
import Ecto.Query
alias Realtime.Api.Message
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
alias Realtime.Database
setup do
diff --git a/test/realtime/tenants_test.exs b/test/realtime/tenants_test.exs
index aefe0b86c..202facdb5 100644
--- a/test/realtime/tenants_test.exs
+++ b/test/realtime/tenants_test.exs
@@ -89,15 +89,6 @@ defmodule Realtime.TenantsTest do
end
end
- describe "update_migrations_ran/1" do
- test "updates migrations_ran to the count of all migrations" do
- tenant = tenant_fixture(%{migrations_ran: 0})
- Tenants.update_migrations_ran(tenant.external_id, 1)
- tenant = Repo.reload!(tenant)
- assert tenant.migrations_ran == 1
- end
- end
-
describe "broadcast_operation_event/2" do
setup do
tenant = tenant_fixture()
diff --git a/test/realtime/user_counter_test.exs b/test/realtime/user_counter_test.exs
index d93529764..f7725885d 100644
--- a/test/realtime/user_counter_test.exs
+++ b/test/realtime/user_counter_test.exs
@@ -3,6 +3,13 @@ defmodule Realtime.UsersCounterTest do
alias Realtime.UsersCounter
alias Realtime.Rpc
+ setup_all do
+ tenant_id = random_string()
+ count = generate_load(tenant_id)
+
+ %{tenant_id: tenant_id, count: count, nodes: Node.list()}
+ end
+
describe "add/1" do
test "starts counter for tenant" do
assert UsersCounter.add(self(), random_string()) == :ok
@@ -11,45 +18,111 @@ defmodule Realtime.UsersCounterTest do
@aux_mod (quote do
defmodule Aux do
- def ping(),
- do:
- spawn(fn ->
- Process.sleep(3000)
- :pong
- end)
+ def ping() do
+ spawn(fn -> Process.sleep(:infinity) end)
+ end
+
+ def join(pid, group) do
+ UsersCounter.add(pid, group)
+ end
end
end)
Code.eval_quoted(@aux_mod)
+ describe "tenant_counts/0" do
+ test "map of tenant and number of users", %{tenant_id: tenant_id, count: expected} do
+ assert UsersCounter.add(self(), tenant_id) == :ok
+ Process.sleep(1000)
+ counts = UsersCounter.tenant_counts()
+
+ assert counts[tenant_id] == expected + 1
+ assert map_size(counts) >= 61
+
+ counts = Beacon.local_member_counts(:users)
+
+ assert counts[tenant_id] == 1
+ assert map_size(counts) >= 1
+
+ counts = Beacon.member_counts(:users)
+
+ assert counts[tenant_id] == expected + 1
+ assert map_size(counts) >= 61
+ end
+ end
+
+ describe "tenant_counts/1" do
+ test "map of tenant and number of users for a node only", %{tenant_id: tenant_id, nodes: nodes} do
+ assert UsersCounter.add(self(), tenant_id) == :ok
+ Process.sleep(1000)
+ my_counts = UsersCounter.tenant_counts(Node.self())
+ # Only one connection from this test process on this node
+ assert my_counts == %{tenant_id => 1}
+
+ another_node_counts = UsersCounter.tenant_counts(hd(nodes))
+ assert another_node_counts[tenant_id] == 2
+
+ assert map_size(another_node_counts) == 21
+
+ assert Beacon.local_member_counts(:users) == %{tenant_id => 1}
+ end
+ end
+
describe "tenant_users/1" do
- test "returns count of connected clients for tenant on cluster node" do
- tenant_id = random_string()
- expected = generate_load(tenant_id)
+ test "returns count of connected clients for tenant on cluster node", %{tenant_id: tenant_id, count: expected} do
Process.sleep(1000)
assert UsersCounter.tenant_users(tenant_id) == expected
end
end
describe "tenant_users/2" do
- test "returns count of connected clients for tenant on target cluster" do
- tenant_id = random_string()
- generate_load(tenant_id)
- {:ok, node} = Clustered.start(@aux_mod)
- pid = Rpc.call(node, Aux, :ping, [])
- UsersCounter.add(pid, tenant_id)
- assert UsersCounter.tenant_users(node, tenant_id) == 1
+ test "returns count of connected clients for tenant on target cluster", %{tenant_id: tenant_id, nodes: nodes} do
+ node = hd(nodes)
+ assert UsersCounter.tenant_users(node, tenant_id) == 2
+
+ assert Beacon.member_count(:users, tenant_id, node) == 2
end
end
- defp generate_load(tenant_id, nodes \\ 2, processes \\ 2) do
- for i <- 1..nodes do
+ defp generate_load(tenant_id) do
+ processes = 2
+
+ nodes = %{
+ :"main@127.0.0.1" => 5969,
+ :"us_node@127.0.0.1" => 16980,
+ :"ap2_nodeX@127.0.0.1" => 16981,
+ :"ap2_nodeY@127.0.0.1" => 16982
+ }
+
+ regions = %{
+ :"us_node@127.0.0.1" => "us-east-1",
+ :"ap2_nodeX@127.0.0.1" => "ap-southeast-2",
+ :"ap2_nodeY@127.0.0.1" => "ap-southeast-2"
+ }
+
+ on_exit(fn -> Application.put_env(:gen_rpc, :client_config_per_node, {:internal, %{}}) end)
+ Application.put_env(:gen_rpc, :client_config_per_node, {:internal, nodes})
+
+ nodes
+ |> Enum.filter(fn {node, _port} -> node != Node.self() end)
+ |> Enum.with_index(1)
+ |> Enum.each(fn {{node, gen_rpc_port}, i} ->
# Avoid port collision
extra_config = [
- {:gen_rpc, :tcp_server_port, 15970 + i}
+ {:gen_rpc, :tcp_server_port, gen_rpc_port},
+ {:gen_rpc, :client_config_per_node, {:internal, nodes}},
+ {:realtime, :users_scope_broadcast_interval_in_ms, 100},
+ {:realtime, :region, regions[node]}
]
- {:ok, node} = Clustered.start(@aux_mod, extra_config: extra_config, phoenix_port: 4012 + i)
+ node_name =
+ node
+ |> to_string()
+ |> String.split("@")
+ |> hd()
+ |> String.to_atom()
+
+ {:ok, node} = Clustered.start(@aux_mod, name: node_name, extra_config: extra_config, phoenix_port: 4012 + i)
for _ <- 1..processes do
pid = Rpc.call(node, Aux, :ping, [])
@@ -57,18 +130,17 @@ defmodule Realtime.UsersCounterTest do
for _ <- 1..10 do
# replicate same pid added multiple times concurrently
Task.start(fn ->
- UsersCounter.add(pid, tenant_id)
+ Rpc.call(node, Aux, :join, [pid, tenant_id])
end)
# noisy neighbors to test handling of bigger loads on concurrent calls
Task.start(fn ->
- pid = Rpc.call(node, Aux, :ping, [])
- UsersCounter.add(pid, random_string())
+ Rpc.call(node, Aux, :join, [pid, random_string()])
end)
end
end
- end
+ end)
- nodes * processes
+ 3 * processes
end
end
diff --git a/test/realtime_web/channels/payloads/join_test.exs b/test/realtime_web/channels/payloads/join_test.exs
index 32bf1b397..f02c2a73d 100644
--- a/test/realtime_web/channels/payloads/join_test.exs
+++ b/test/realtime_web/channels/payloads/join_test.exs
@@ -6,6 +6,7 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
alias RealtimeWeb.Channels.Payloads.Join
alias RealtimeWeb.Channels.Payloads.Config
alias RealtimeWeb.Channels.Payloads.Broadcast
+ alias RealtimeWeb.Channels.Payloads.Broadcast.Replay
alias RealtimeWeb.Channels.Payloads.Presence
alias RealtimeWeb.Channels.Payloads.PostgresChange
@@ -17,7 +18,7 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
config = %{
"config" => %{
"private" => false,
- "broadcast" => %{"ack" => false, "self" => false},
+ "broadcast" => %{"ack" => false, "self" => false, "replay" => %{"since" => 1, "limit" => 10}},
"presence" => %{"enabled" => true, "key" => key},
"postgres_changes" => [
%{"event" => "INSERT", "schema" => "public", "table" => "users", "filter" => "id=eq.1"},
@@ -37,8 +38,9 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
postgres_changes: postgres_changes
} = config
- assert %Broadcast{ack: false, self: false} = broadcast
+ assert %Broadcast{ack: false, self: false, replay: replay} = broadcast
assert %Presence{enabled: true, key: ^key} = presence
+ assert %Replay{since: 1, limit: 10} = replay
assert [
%PostgresChange{event: "INSERT", schema: "public", table: "users", filter: "id=eq.1"},
@@ -56,6 +58,25 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
assert is_binary(key)
end
+ test "presence key can be number" do
+ config = %{"config" => %{"presence" => %{"enabled" => true, "key" => 123}}}
+
+ assert {:ok, %Join{config: %Config{presence: %Presence{key: key}}}} = Join.validate(config)
+
+ assert key == 123
+ end
+
+ test "invalid replay" do
+ config = %{"config" => %{"broadcast" => %{"replay" => 123}}}
+
+ assert {
+ :error,
+ :invalid_join_payload,
+ %{config: %{broadcast: %{replay: ["unable to parse, expected a map"]}}}
+ } =
+ Join.validate(config)
+ end
+
test "missing enabled presence defaults to true" do
config = %{"config" => %{"presence" => %{}}}
@@ -92,5 +113,11 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
user_token: ["unable to parse, expected string"]
}
end
+
+ test "handles postgres changes with nil value in array as empty array" do
+ config = %{"config" => %{"postgres_changes" => [nil]}}
+
+ assert {:ok, %Join{config: %Config{postgres_changes: []}}} = Join.validate(config)
+ end
end
end
diff --git a/test/realtime_web/channels/realtime_channel/broadcast_handler_test.exs b/test/realtime_web/channels/realtime_channel/broadcast_handler_test.exs
index 2cd7005df..b2aa9b90e 100644
--- a/test/realtime_web/channels/realtime_channel/broadcast_handler_test.exs
+++ b/test/realtime_web/channels/realtime_channel/broadcast_handler_test.exs
@@ -1,5 +1,8 @@
defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
- use Realtime.DataCase, async: true
+ use Realtime.DataCase,
+ async: true,
+ parameterize: [%{serializer: Phoenix.Socket.V1.JSONSerializer}, %{serializer: RealtimeWeb.Socket.V2Serializer}]
+
use Mimic
import Generators
@@ -17,26 +20,27 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
setup [:initiate_tenant]
+ @payload %{"a" => "b"}
+
describe "handle/3" do
- test "with write true policy, user is able to send message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "with write true policy, user is able to send message",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic, policies: %Policies{broadcast: %BroadcastPolicies{write: true}})
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_receive {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
- {:ok, %{avg: avg, bucket: buckets}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg, bucket: buckets}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert Enum.sum(buckets) == 100
assert avg > 0
end
@@ -50,40 +54,37 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(120)
-
refute_received _any
- {:ok, %{avg: avg}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert avg == 0.0
end
@tag policies: [:authenticated_read_broadcast, :authenticated_write_broadcast]
- test "with nil policy but valid user, is able to send message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "with nil policy but valid user, is able to send message",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic)
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
- {:ok, %{avg: avg, bucket: buckets}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg, bucket: buckets}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert Enum.sum(buckets) == 100
assert avg > 0.0
end
@tag policies: [:authenticated_read_matching_user_sub, :authenticated_write_matching_user_sub], sub: UUID.generate()
- test "with valid sub, is able to send message", %{topic: topic, tenant: tenant, db_conn: db_conn, sub: sub} do
+ test "with valid sub, is able to send message",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, sub: sub, serializer: serializer} do
socket =
socket_fixture(tenant, topic,
policies: %Policies{broadcast: %BroadcastPolicies{write: nil, read: true}},
@@ -92,17 +93,14 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
end
@@ -120,13 +118,12 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(120)
-
- refute_received {:socket_push, :text, _}
+ refute_receive {:socket_push, :text, _}, 120
end
@tag policies: [:read_matching_user_role, :write_matching_user_role], role: "anon"
- test "with valid role, is able to send message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "with valid role, is able to send message",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket =
socket_fixture(tenant, topic,
policies: %Policies{broadcast: %BroadcastPolicies{write: nil, read: true}},
@@ -135,17 +132,14 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
end
@@ -163,9 +157,7 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(120)
-
- refute_received {:socket_push, :text, _}
+ refute_receive {:socket_push, :text, _}, 120
end
test "with nil policy and invalid user, won't send message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
@@ -177,16 +169,15 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(120)
-
refute_received _any
- {:ok, %{avg: avg}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert avg == 0.0
end
@tag policies: [:authenticated_read_broadcast, :authenticated_write_broadcast]
- test "validation only runs once on nil and valid policies", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "validation only runs once on nil and valid policies",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic)
expect(Authorization, :get_write_authorizations, 1, fn conn, db_conn, auth_context ->
@@ -197,15 +188,14 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_receive {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
end
@@ -222,12 +212,10 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(100)
-
- refute_received _
+ refute_receive _, 100
end
- test "no ack still sends message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "no ack still sends message", %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket =
socket_fixture(tenant, topic,
policies: %Policies{broadcast: %BroadcastPolicies{write: true}},
@@ -236,7 +224,7 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100, reduce: socket do
socket ->
- {:noreply, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:noreply, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
@@ -245,56 +233,128 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
end
- test "public channels are able to send messages", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "public channels are able to send messages",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic, private?: false, policies: nil)
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
- {:ok, %{avg: avg, bucket: buckets}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg, bucket: buckets}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert Enum.sum(buckets) == 100
assert avg > 0.0
end
- test "public channels are able to send messages and ack", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "public channels are able to send messages and ack",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic, private?: false, policies: nil)
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_receive {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
- Process.sleep(120)
- {:ok, %{avg: avg, bucket: buckets}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg, bucket: buckets}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert Enum.sum(buckets) == 100
assert avg > 0.0
end
+ test "V2 json UserBroadcastPush", %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
+ socket = socket_fixture(tenant, topic, private?: false, policies: nil)
+
+ user_broadcast_payload = %{"a" => "b"}
+ json_encoded_user_broadcast_payload = Jason.encode!(user_broadcast_payload)
+
+ {:reply, :ok, _socket} =
+ BroadcastHandler.handle({"event123", :json, json_encoded_user_broadcast_payload, %{}}, db_conn, socket)
+
+ topic = "realtime:#{topic}"
+ assert_receive {:socket_push, code, data}
+
+ if serializer == RealtimeWeb.Socket.V2Serializer do
+ assert code == :binary
+
+ assert data ==
+ <<
+ # user broadcast = 4
+ 4::size(8),
+ # topic_size
+ byte_size(topic),
+ # user_event_size
+ byte_size("event123"),
+ # metadata_size
+ 0,
+ # json encoding
+ 1::size(8),
+ topic::binary,
+ "event123"
+ >> <> json_encoded_user_broadcast_payload
+ else
+ assert code == :text
+
+ assert Jason.decode!(data) ==
+ message(serializer, topic, %{
+ "event" => "event123",
+ "payload" => user_broadcast_payload,
+ "type" => "broadcast"
+ })
+ end
+ end
+
+ test "V2 binary UserBroadcastPush", %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
+ socket = socket_fixture(tenant, topic, private?: false, policies: nil)
+
+ user_broadcast_payload = <<123, 456, 789>>
+
+ {:reply, :ok, _socket} =
+ BroadcastHandler.handle({"event123", :binary, user_broadcast_payload, %{}}, db_conn, socket)
+
+ topic = "realtime:#{topic}"
+
+ if serializer == RealtimeWeb.Socket.V2Serializer do
+ assert_receive {:socket_push, :binary, data}
+
+ assert data ==
+ <<
+ # user broadcast = 4
+ 4::size(8),
+ # topic_size
+ byte_size(topic),
+ # user_event_size
+ byte_size("event123"),
+ # metadata_size
+ 0,
+ # binary encoding
+ 0::size(8),
+ topic::binary,
+ "event123"
+ >> <> user_broadcast_payload
+ else
+ # Can't receive binary payloads on V1 serializer
+ refute_receive {:socket_push, _code, _data}
+ end
+ end
+
@tag policies: [:broken_write_presence]
test "handle failing rls policy", %{topic: topic, tenant: tenant, db_conn: db_conn} do
socket = socket_fixture(tenant, topic)
@@ -303,14 +363,81 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
capture_log(fn ->
{:noreply, _socket} = BroadcastHandler.handle(%{}, db_conn, socket)
- # Enough for the RateCounter to calculate the last bucket
- refute_received _, 1200
+ {:ok, %{avg: avg}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
+ assert avg == 0.0
+
+ refute_receive _, 200
end)
assert log =~ "RlsPolicyError"
+ end
- {:ok, %{avg: avg}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
- assert avg == 0.0
+ test "handle payload size excedding limits in private channels", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ socket =
+ socket_fixture(tenant, topic,
+ policies: %Policies{broadcast: %BroadcastPolicies{write: true}},
+ ack_broadcast: false
+ )
+
+ assert {:noreply, _} =
+ BroadcastHandler.handle(
+ %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ db_conn,
+ socket
+ )
+
+ refute_receive {:socket_push, :text, _}, 120
+ end
+
+ test "handle payload size excedding limits in public channels", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ socket = socket_fixture(tenant, topic, ack_broadcast: false, private?: false)
+
+ assert {:noreply, _} =
+ BroadcastHandler.handle(
+ %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ db_conn,
+ socket
+ )
+
+ refute_receive {:socket_push, :text, _}, 120
+ end
+
+ test "handle payload size excedding limits in private channel and if ack it will receive error", %{
+ topic: topic,
+ tenant: tenant,
+ db_conn: db_conn
+ } do
+ socket =
+ socket_fixture(tenant, topic,
+ policies: %Policies{broadcast: %BroadcastPolicies{write: true}},
+ ack_broadcast: true
+ )
+
+ assert {:reply, {:error, :payload_size_exceeded}, _} =
+ BroadcastHandler.handle(
+ %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ db_conn,
+ socket
+ )
+
+ refute_receive {:socket_push, :text, _}, 120
+ end
+
+ test "handle payload size excedding limits in public channels and if ack it will receive error", %{
+ topic: topic,
+ tenant: tenant,
+ db_conn: db_conn
+ } do
+ socket = socket_fixture(tenant, topic, ack_broadcast: true, private?: false)
+
+ assert {:reply, {:error, :payload_size_exceeded}, _} =
+ BroadcastHandler.handle(
+ %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ db_conn,
+ socket
+ )
+
+ refute_receive {:socket_push, :text, _}, 120
end
end
@@ -318,7 +445,7 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
rate = Tenants.events_per_second_rate(tenant)
RateCounter.new(rate, tick: 100)
@@ -331,7 +458,7 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
fastlane =
RealtimeWeb.RealtimeChannel.MessageDispatcher.fastlane_metadata(
self(),
- Phoenix.Socket.V1.JSONSerializer,
+ context.serializer,
"realtime:#{topic}",
:warning,
"tenant_id"
@@ -389,4 +516,10 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
}
}
end
+
+ defp message(RealtimeWeb.Socket.V2Serializer, topic, payload), do: [nil, nil, topic, "broadcast", payload]
+
+ defp message(Phoenix.Socket.V1.JSONSerializer, topic, payload) do
+ %{"event" => "broadcast", "payload" => payload, "ref" => nil, "topic" => topic}
+ end
end
diff --git a/test/realtime_web/channels/realtime_channel/logging_test.exs b/test/realtime_web/channels/realtime_channel/logging_test.exs
index 92634daef..cd131d16e 100644
--- a/test/realtime_web/channels/realtime_channel/logging_test.exs
+++ b/test/realtime_web/channels/realtime_channel/logging_test.exs
@@ -37,6 +37,7 @@ defmodule RealtimeWeb.RealtimeChannel.LoggingTest do
assert log =~ "sub=#{sub}"
assert log =~ "exp=#{exp}"
assert log =~ "iss=#{iss}"
+ assert log =~ "error_code=TestError"
end
end
@@ -57,6 +58,7 @@ defmodule RealtimeWeb.RealtimeChannel.LoggingTest do
assert log =~ "sub=#{sub}"
assert log =~ "exp=#{exp}"
assert log =~ "iss=#{iss}"
+ assert log =~ "error_code=TestWarning"
end
end
@@ -67,10 +69,14 @@ defmodule RealtimeWeb.RealtimeChannel.LoggingTest do
for log_level <- log_levels do
socket = %{assigns: %{log_level: log_level, tenant: random_string(), access_token: "test_token"}}
- assert capture_log(fn ->
- assert Logging.maybe_log_error(socket, "TestCode", "test message") ==
- {:error, %{reason: "TestCode: test message"}}
- end) =~ "TestCode: test message"
+ log =
+ capture_log(fn ->
+ assert Logging.maybe_log_error(socket, "TestCode", "test message") ==
+ {:error, %{reason: "TestCode: test message"}}
+ end)
+
+ assert log =~ "TestCode: test message"
+ assert log =~ "error_code=TestCode"
assert capture_log(fn ->
assert Logging.maybe_log_error(socket, "TestCode", %{a: "b"}) ==
@@ -103,11 +109,14 @@ defmodule RealtimeWeb.RealtimeChannel.LoggingTest do
for log_level <- log_levels do
socket = %{assigns: %{log_level: log_level, tenant: random_string(), access_token: "test_token"}}
- assert capture_log(fn ->
- assert Logging.maybe_log_warning(socket, "TestCode", "test message") ==
- {:error, %{reason: "TestCode: test message"}}
- end) =~
- "TestCode: test message"
+ log =
+ capture_log(fn ->
+ assert Logging.maybe_log_warning(socket, "TestCode", "test message") ==
+ {:error, %{reason: "TestCode: test message"}}
+ end)
+
+ assert log =~ "TestCode: test message"
+ assert log =~ "error_code=TestCode"
assert capture_log(fn ->
assert Logging.maybe_log_warning(socket, "TestCode", %{a: "b"}) ==
diff --git a/test/realtime_web/channels/realtime_channel/message_dispatcher_test.exs b/test/realtime_web/channels/realtime_channel/message_dispatcher_test.exs
index 7a9e2eb25..834cf7ad8 100644
--- a/test/realtime_web/channels/realtime_channel/message_dispatcher_test.exs
+++ b/test/realtime_web/channels/realtime_channel/message_dispatcher_test.exs
@@ -4,7 +4,10 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
import ExUnit.CaptureLog
alias Phoenix.Socket.Broadcast
+ alias Phoenix.Socket.V1
alias RealtimeWeb.RealtimeChannel.MessageDispatcher
+ alias RealtimeWeb.Socket.UserBroadcast
+ alias RealtimeWeb.Socket.V2Serializer
defmodule TestSerializer do
def fastlane!(msg) do
@@ -16,18 +19,35 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
describe "fastlane_metadata/5" do
test "info level" do
assert MessageDispatcher.fastlane_metadata(self(), Serializer, "realtime:topic", :info, "tenant_id") ==
- {:realtime_channel_fastlane, self(), Serializer, "realtime:topic", {:log, "tenant_id"}}
+ {:rc_fastlane, self(), Serializer, "realtime:topic", :info, "tenant_id", MapSet.new()}
end
test "non-info level" do
assert MessageDispatcher.fastlane_metadata(self(), Serializer, "realtime:topic", :warning, "tenant_id") ==
- {:realtime_channel_fastlane, self(), Serializer, "realtime:topic"}
+ {:rc_fastlane, self(), Serializer, "realtime:topic", :warning, "tenant_id", MapSet.new()}
+ end
+
+ test "replayed message ids" do
+ assert MessageDispatcher.fastlane_metadata(
+ self(),
+ Serializer,
+ "realtime:topic",
+ :warning,
+ "tenant_id",
+ MapSet.new([1])
+ ) ==
+ {:rc_fastlane, self(), Serializer, "realtime:topic", :warning, "tenant_id", MapSet.new([1])}
end
end
describe "dispatch/3" do
setup do
- {:ok, _pid} = Agent.start_link(fn -> 0 end, name: TestSerializer)
+ {:ok, _pid} =
+ start_supervised(%{
+ id: TestSerializer,
+ start: {Agent, :start_link, [fn -> 0 end, [name: TestSerializer]]}
+ })
+
:ok
end
@@ -50,12 +70,11 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
from_pid = :erlang.list_to_pid(~c'<0.2.1>')
subscribers = [
- {subscriber_pid, {:realtime_channel_fastlane, self(), TestSerializer, "realtime:topic", {:log, "tenant123"}}},
- {subscriber_pid, {:realtime_channel_fastlane, self(), TestSerializer, "realtime:topic"}}
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :warning, "tenant123", MapSet.new()}}
]
msg = %Broadcast{topic: "some:other:topic", event: "event", payload: %{data: "test"}}
- require Logger
log =
capture_log(fn ->
@@ -75,6 +94,130 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
refute_receive _any
end
+ test "dispatches 'presence_diff' messages to fastlane subscribers" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :info, "tenant456", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :warning, "tenant456", MapSet.new()}}
+ ]
+
+ msg = %Broadcast{topic: "some:other:topic", event: "presence_diff", payload: %{data: "test"}}
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+
+ assert_receive {:encoded, %Broadcast{event: "presence_diff", payload: %{data: "test"}, topic: "realtime:topic"}}
+ assert_receive {:encoded, %Broadcast{event: "presence_diff", payload: %{data: "test"}, topic: "realtime:topic"}}
+
+ assert Agent.get(TestSerializer, & &1) == 1
+
+ assert Realtime.GenCounter.get(Realtime.Tenants.presence_events_per_second_key("tenant456")) == 2
+
+ refute_receive _any
+ end
+
+ test "does not dispatch messages to fastlane subscribers if they already replayed it" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+ replaeyd_message_ids = MapSet.new(["123"])
+
+ subscribers = [
+ {subscriber_pid,
+ {:rc_fastlane, self(), TestSerializer, "realtime:topic", :info, "tenant123", replaeyd_message_ids}},
+ {subscriber_pid,
+ {:rc_fastlane, self(), TestSerializer, "realtime:topic", :warning, "tenant123", replaeyd_message_ids}}
+ ]
+
+ msg = %Broadcast{
+ topic: "some:other:topic",
+ event: "event",
+ payload: %{"data" => "test", "meta" => %{"id" => "123"}}
+ }
+
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+
+ assert Agent.get(TestSerializer, & &1) == 0
+
+ refute_receive _any
+ end
+
+ test "payload is not a map" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :warning, "tenant123", MapSet.new()}}
+ ]
+
+ msg = %Broadcast{topic: "some:other:topic", event: "event", payload: "not a map"}
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+
+ assert_receive {:encoded, %Broadcast{event: "event", payload: "not a map", topic: "realtime:topic"}}
+ assert_receive {:encoded, %Broadcast{event: "event", payload: "not a map", topic: "realtime:topic"}}
+
+ assert Agent.get(TestSerializer, & &1) == 1
+
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+
+ refute_receive _any
+ end
+
test "dispatches messages to non fastlane subscribers" do
from_pid = :erlang.list_to_pid(~c'<0.2.1>')
@@ -93,5 +236,236 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
# TestSerializer is not called
assert Agent.get(TestSerializer, & &1) == 0
end
+
+ test "dispatches Broadcast to V1 & V2 Serializers" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}}
+ ]
+
+ msg = %Broadcast{topic: "some:other:topic", event: "event", payload: %{data: "test"}}
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+
+ # Receive 2 messages using V1
+ assert_receive {:socket_push, :text, message_v1}
+ assert_receive {:socket_push, :text, ^message_v1}
+
+ assert Jason.decode!(message_v1) == %{
+ "event" => "event",
+ "payload" => %{"data" => "test"},
+ "ref" => nil,
+ "topic" => "realtime:topic"
+ }
+
+ # Receive 2 messages using V2
+ assert_receive {:socket_push, :text, message_v2}
+ assert_receive {:socket_push, :text, ^message_v2}
+
+ # V2 is an array format
+ assert Jason.decode!(message_v2) == [nil, nil, "realtime:topic", "event", %{"data" => "test"}]
+
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+
+ refute_receive _any
+ end
+
+ test "dispatches json UserBroadcast to V1 & V2 Serializers" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}}
+ ]
+
+ user_payload = Jason.encode!(%{data: "test"})
+
+ msg = %UserBroadcast{
+ topic: "some:other:topic",
+ user_event: "event123",
+ user_payload: user_payload,
+ user_payload_encoding: :json,
+ metadata: %{"id" => "123", "replayed" => true}
+ }
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+
+ # Receive 2 messages using V1
+ assert_receive {:socket_push, :text, message_v1}
+ assert_receive {:socket_push, :text, ^message_v1}
+
+ assert Jason.decode!(message_v1) == %{
+ "event" => "broadcast",
+ "payload" => %{
+ "event" => "event123",
+ "meta" => %{"id" => "123", "replayed" => true},
+ "payload" => %{"data" => "test"},
+ "type" => "broadcast"
+ },
+ "ref" => nil,
+ "topic" => "realtime:topic"
+ }
+
+ # Receive 2 messages using V2
+ assert_receive {:socket_push, :binary, message_v2}
+ assert_receive {:socket_push, :binary, ^message_v2}
+
+ encoded_metadata = Jason.encode!(%{"id" => "123", "replayed" => true})
+ metadata_size = byte_size(encoded_metadata)
+
+ # binary payload structure
+ assert message_v2 ==
+ <<
+ # user broadcast = 4
+ 4::size(8),
+ # topic_size
+ 14,
+ # user_event_size
+ 8,
+ # metadata_size
+ metadata_size,
+ # json encoding
+ 1::size(8),
+ "realtime:topic",
+ "event123"
+ >> <> encoded_metadata <> user_payload
+
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+
+ refute_receive _any
+ end
+
+ test "dispatches binary UserBroadcast to V1 & V2 Serializers" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}}
+ ]
+
+ user_payload = <<123, 456, 789>>
+
+ msg = %UserBroadcast{
+ topic: "some:other:topic",
+ user_event: "event123",
+ user_payload: user_payload,
+ user_payload_encoding: :binary,
+ metadata: %{"id" => "123", "replayed" => true}
+ }
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+ assert log =~ "User payload encoding is not JSON"
+
+ # Only prints once
+ assert String.split(log, "User payload encoding is not JSON") |> length() == 2
+
+ # No V1 message received as binary payloads are not supported
+ refute_receive {:socket_push, :text, _message_v1}
+
+ # Receive 2 messages using V2
+ assert_receive {:socket_push, :binary, message_v2}
+ assert_receive {:socket_push, :binary, ^message_v2}
+
+ encoded_metadata = Jason.encode!(%{"id" => "123", "replayed" => true})
+ metadata_size = byte_size(encoded_metadata)
+
+ # binary payload structure
+ assert message_v2 ==
+ <<
+ # user broadcast = 4
+ 4::size(8),
+ # topic_size
+ 14,
+ # user_event_size
+ 8,
+ # metadata_size
+ metadata_size,
+ # binary encoding
+ 0::size(8),
+ "realtime:topic",
+ "event123"
+ >> <> encoded_metadata <> user_payload
+
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+
+ refute_receive _any
+ end
end
end
diff --git a/test/realtime_web/channels/realtime_channel/presence_handler_test.exs b/test/realtime_web/channels/realtime_channel/presence_handler_test.exs
index e5ecd32ad..1ef635838 100644
--- a/test/realtime_web/channels/realtime_channel/presence_handler_test.exs
+++ b/test/realtime_web/channels/realtime_channel/presence_handler_test.exs
@@ -99,26 +99,42 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
end
end
- describe "handle/2" do
+ describe "handle/3" do
+ setup %{tenant: tenant} do
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ :telemetry.attach(
+ __MODULE__,
+ [:realtime, :tenants, :payload, :size],
+ &__MODULE__.handle_telemetry/4,
+ %{pid: self(), tenant: tenant}
+ )
+ end
+
test "with true policy and is private, user can track their presence and changes", %{
tenant: tenant,
topic: topic,
db_conn: db_conn
} do
+ external_id = tenant.external_id
key = random_string()
policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
socket =
socket_fixture(tenant, topic, key, policies: policies)
- PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"A" => "b", "c" => "b"}}, db_conn, socket)
topic = socket.assigns.tenant_topic
assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
assert Map.has_key?(joins, key)
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 30},
+ %{tenant: ^external_id, message_type: :presence}}
end
test "when tracking already existing user, metadata updated", %{tenant: tenant, topic: topic, db_conn: db_conn} do
+ external_id = tenant.external_id
key = random_string()
policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
socket = socket_fixture(tenant, topic, key, policies: policies)
@@ -134,19 +150,87 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
assert Map.has_key?(joins, key)
- refute_receive :_
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 6},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 55},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ refute_receive _
+ end
+
+ test "tracking the same payload does nothing", %{tenant: tenant, topic: topic, db_conn: db_conn} do
+ external_id = tenant.external_id
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies)
+
+ assert {:ok, socket} = PresenceHandler.handle(%{"event" => "track", "payload" => %{"a" => "b"}}, db_conn, socket)
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 18},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ topic = socket.assigns.tenant_topic
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert Map.has_key?(joins, key)
+
+ assert {:ok, _socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"a" => "b"}}, db_conn, socket)
+
+ refute_receive _
+ end
+
+ test "tracking, untracking and then tracking the same payload emit events", context do
+ %{tenant: tenant, topic: topic, db_conn: db_conn} = context
+ external_id = tenant.external_id
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies)
+
+ assert {:ok, socket} = PresenceHandler.handle(%{"event" => "track", "payload" => %{"a" => "b"}}, db_conn, socket)
+ assert socket.assigns.presence_track_payload == %{"a" => "b"}
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 18},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ topic = socket.assigns.tenant_topic
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert %{^key => %{metas: [%{:phx_ref => _, "a" => "b"}]}} = joins
+
+ assert {:ok, socket} = PresenceHandler.handle(%{"event" => "untrack"}, db_conn, socket)
+ assert socket.assigns.presence_track_payload == nil
+
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: %{}, leaves: leaves}}
+ assert %{^key => %{metas: [%{:phx_ref => _, "a" => "b"}]}} = leaves
+
+ assert {:ok, socket} = PresenceHandler.handle(%{"event" => "track", "payload" => %{"a" => "b"}}, db_conn, socket)
+
+ assert socket.assigns.presence_track_payload == %{"a" => "b"}
+
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert %{^key => %{metas: [%{:phx_ref => _, "a" => "b"}]}} = joins
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 18},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ refute_receive _
end
test "with false policy and is public, user can track their presence and changes", %{tenant: tenant, topic: topic} do
+ external_id = tenant.external_id
key = random_string()
policies = %Policies{presence: %PresencePolicies{read: false, write: false}}
socket = socket_fixture(tenant, topic, key, policies: policies, private?: false)
- assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "track"}, socket)
+ assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "track"}, nil, socket)
topic = socket.assigns.tenant_topic
assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
assert Map.has_key?(joins, key)
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 6},
+ %{tenant: ^external_id, message_type: :presence}}
end
test "user can untrack when they want", %{tenant: tenant, topic: topic, db_conn: db_conn} do
@@ -229,6 +313,7 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
assert {:ok, socket} =
PresenceHandler.handle(
%{"event" => "track", "payload" => %{"metadata" => random_string()}},
+ nil,
socket
)
@@ -248,7 +333,7 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
assert log =~ "UnknownPresenceEvent"
end
- test "socket with presence enabled false will ignore presence events in public channel", %{
+ test "socket with presence enabled false will ignore non-track presence events in public channel", %{
tenant: tenant,
topic: topic
} do
@@ -256,12 +341,12 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
socket = socket_fixture(tenant, topic, key, policies: policies, private?: false, enabled?: false)
- assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "track"}, socket)
+ assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "untrack"}, nil, socket)
topic = socket.assigns.tenant_topic
refute_receive %Broadcast{topic: ^topic, event: "presence_diff"}
end
- test "socket with presence enabled false will ignore presence events in private channel", %{
+ test "socket with presence enabled false will ignore non-track presence events in private channel", %{
tenant: tenant,
topic: topic,
db_conn: db_conn
@@ -270,11 +355,80 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
socket = socket_fixture(tenant, topic, key, policies: policies, private?: false, enabled?: false)
- assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
+ assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "untrack"}, db_conn, socket)
topic = socket.assigns.tenant_topic
refute_receive %Broadcast{topic: ^topic, event: "presence_diff"}
end
+ test "socket with presence disabled will enable presence on track message for public channel", %{
+ tenant: tenant,
+ topic: topic
+ } do
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies, private?: false, enabled?: false)
+
+ refute socket.assigns.presence_enabled?
+
+ assert {:ok, updated_socket} = PresenceHandler.handle(%{"event" => "track"}, nil, socket)
+
+ assert updated_socket.assigns.presence_enabled?
+ topic = socket.assigns.tenant_topic
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert Map.has_key?(joins, key)
+ end
+
+ test "socket with presence disabled will enable presence on track message for private channel", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn
+ } do
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies, private?: true, enabled?: false)
+
+ refute socket.assigns.presence_enabled?
+
+ assert {:ok, updated_socket} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
+
+ assert updated_socket.assigns.presence_enabled?
+ topic = socket.assigns.tenant_topic
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert Map.has_key?(joins, key)
+ end
+
+ test "socket with presence disabled will not enable presence on untrack message", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn
+ } do
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies, enabled?: false)
+
+ refute socket.assigns.presence_enabled?
+
+ assert {:ok, updated_socket} = PresenceHandler.handle(%{"event" => "untrack"}, db_conn, socket)
+
+ refute updated_socket.assigns.presence_enabled?
+ topic = socket.assigns.tenant_topic
+ refute_receive %Broadcast{topic: ^topic, event: "presence_diff"}
+ end
+
+ test "socket with presence disabled will not enable presence on unknown event", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn
+ } do
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies, enabled?: false)
+
+ refute socket.assigns.presence_enabled?
+
+ assert {:error, :unknown_presence_event} = PresenceHandler.handle(%{"event" => "unknown"}, db_conn, socket)
+ end
+
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
test "rate limit is checked on private channel", %{tenant: tenant, topic: topic, db_conn: db_conn} do
key = random_string()
@@ -284,7 +438,8 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
log =
capture_log(fn ->
for _ <- 1..300, do: PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
- Process.sleep(1100)
+
+ {:ok, _} = RateCounterHelper.tick!(Tenants.presence_events_per_second_rate(tenant))
assert {:error, :rate_limit_exceeded} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
end)
@@ -299,13 +454,25 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
log =
capture_log(fn ->
for _ <- 1..300, do: PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
- Process.sleep(1100)
+
+ {:ok, _} = RateCounterHelper.tick!(Tenants.presence_events_per_second_rate(tenant))
assert {:error, :rate_limit_exceeded} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
end)
assert log =~ "PresenceRateLimitReached"
end
+
+ test "fails on high payload size", %{tenant: tenant, topic: topic, db_conn: db_conn} do
+ key = random_string()
+ socket = socket_fixture(tenant, topic, key, private?: false)
+ payload_size = tenant.max_payload_size_in_kb * 1000
+
+ payload = %{content: random_string(payload_size)}
+
+ assert {:error, :payload_size_exceeded} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => payload}, db_conn, socket)
+ end
end
describe "sync/1" do
@@ -356,7 +523,8 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
log =
capture_log(fn ->
for _ <- 1..300, do: PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
- Process.sleep(1100)
+
+ {:ok, _} = RateCounterHelper.tick!(Tenants.presence_events_per_second_rate(tenant))
assert {:error, :rate_limit_exceeded} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
end)
@@ -372,7 +540,8 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
log =
capture_log(fn ->
for _ <- 1..300, do: PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
- Process.sleep(1100)
+
+ {:ok, _} = RateCounterHelper.tick!(Tenants.presence_events_per_second_rate(tenant))
assert {:error, :rate_limit_exceeded} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
end)
@@ -384,7 +553,7 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
defp initiate_tenant(context) do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
{:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
@@ -447,4 +616,10 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
}
}
end
+
+ def handle_telemetry(event, measures, metadata, %{pid: pid, tenant: tenant}) do
+ if metadata[:tenant] == tenant.external_id do
+ send(pid, {:telemetry, event, measures, metadata})
+ end
+ end
end
diff --git a/test/realtime_web/channels/realtime_channel/tracker_test.exs b/test/realtime_web/channels/realtime_channel/tracker_test.exs
index 2590b9597..7137256c1 100644
--- a/test/realtime_web/channels/realtime_channel/tracker_test.exs
+++ b/test/realtime_web/channels/realtime_channel/tracker_test.exs
@@ -1,5 +1,7 @@
defmodule RealtimeWeb.RealtimeChannel.TrackerTest do
- use Realtime.DataCase
+ # It kills websockets when no channels are open
+ # It can affect other tests
+ use Realtime.DataCase, async: false
alias RealtimeWeb.RealtimeChannel.Tracker
setup do
diff --git a/test/realtime_web/channels/realtime_channel_test.exs b/test/realtime_web/channels/realtime_channel_test.exs
index 2dff83da3..c92e8779a 100644
--- a/test/realtime_web/channels/realtime_channel_test.exs
+++ b/test/realtime_web/channels/realtime_channel_test.exs
@@ -1,12 +1,11 @@
defmodule RealtimeWeb.RealtimeChannelTest do
- # Can't run async true because under the hood Cachex is used and it doesn't see Ecto Sandbox
- use RealtimeWeb.ChannelCase, async: false
+ use RealtimeWeb.ChannelCase, async: true
use Mimic
import ExUnit.CaptureLog
- alias Phoenix.Socket
alias Phoenix.Channel.Server
+ alias Phoenix.Socket
alias Realtime.Tenants.Authorization
alias Realtime.Tenants.Connect
@@ -23,39 +22,540 @@ defmodule RealtimeWeb.RealtimeChannelTest do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
+ Realtime.Tenants.Cache.update_cache(tenant)
{:ok, tenant: tenant}
end
setup :rls_context
- describe "presence" do
- test "events are counted", %{tenant: tenant} do
+ describe "process flags" do
+ test "max heap size is set for both transport and channel processes", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ assert Process.info(socket.transport_pid, :max_heap_size) ==
+ {:max_heap_size, %{error_logger: true, include_shared_binaries: false, kill: true, size: 6_250_000}}
+
+ assert {:ok, _, socket} = subscribe_and_join(socket, "realtime:test", %{})
+
+ assert Process.info(socket.channel_pid, :max_heap_size) ==
+ {:max_heap_size, %{error_logger: true, include_shared_binaries: false, kill: true, size: 6_250_000}}
+ end
+
+ # We don't test the socket because on unit tests Phoenix is not setting the fullsweep_after config
+ test "fullsweep_after is set on channel process", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ assert {:ok, _, socket} = subscribe_and_join(socket, "realtime:test", %{})
+
+ assert Process.info(socket.channel_pid, :fullsweep_after) == {:fullsweep_after, 20}
+ end
+ end
+
+ describe "postgres changes" do
+ test "subscribes to inserts", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [%{"event" => "INSERT", "schema" => "public", "table" => "test"}]
+ }
+
+ assert {:ok, reply, _socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{:id => sub_id, "event" => "INSERT", "schema" => "public", "table" => "test"}]} =
+ reply
+
+ assert_push "system",
+ %{message: "Subscribed to PostgreSQL", status: "ok", extension: "postgres_changes", channel: "test"},
+ 5000
+
+ {:ok, conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+
+ assert_push "postgres_changes", %{data: data, ids: [^sub_id]}, 500
+
+ # we encode and decode because the data is a Jason.Fragment
+ assert %{
+ "table" => "test",
+ "type" => "INSERT",
+ "record" => %{"details" => "test", "id" => ^id},
+ "columns" => [%{"name" => "id", "type" => "int4"}, %{"name" => "details", "type" => "text"}],
+ "errors" => nil,
+ "schema" => "public",
+ "commit_timestamp" => _
+ } = Jason.encode!(data) |> Jason.decode!()
+
+ refute_receive %Socket.Message{}
+ refute_receive %Socket.Reply{}
+ end
+
+ test "multiple subscriptions", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [
+ %{"event" => "INSERT", "schema" => "public", "table" => "test"},
+ %{"event" => "DELETE", "schema" => "public", "table" => "test"}
+ ]
+ }
+
+ assert {:ok, reply, _socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{
+ postgres_changes: [
+ %{:id => sub_id, "event" => "INSERT", "schema" => "public", "table" => "test"},
+ %{
+ :id => 4_845_530,
+ "event" => "DELETE",
+ "schema" => "public",
+ "table" => "test"
+ }
+ ]
+ } =
+ reply
+
+ assert_push "system",
+ %{message: "Subscribed to PostgreSQL", status: "ok", extension: "postgres_changes", channel: "test"},
+ 5000
+
+ {:ok, conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+
+ assert_push "postgres_changes", %{data: data, ids: [4_845_530, ^sub_id]}, 500
+
+ # we encode and decode because the data is a Jason.Fragment
+ assert %{
+ "table" => "test",
+ "type" => "INSERT",
+ "record" => %{"details" => "test", "id" => ^id},
+ "columns" => [%{"name" => "id", "type" => "int4"}, %{"name" => "details", "type" => "text"}],
+ "errors" => nil,
+ "schema" => "public",
+ "commit_timestamp" => _
+ } = Jason.encode!(data) |> Jason.decode!()
+
+ refute_receive %Socket.Message{}
+ refute_receive %Socket.Reply{}
+ end
+
+ test "malformed subscription params", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [%{"event" => "*", "schema" => "public", "table" => "test", "filter" => "wrong"}]
+ }
+
+ assert {:ok, reply, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{"event" => "*", "schema" => "public", "table" => "test"}]} = reply
+
+ assert_push "system",
+ %{
+ message: "Error parsing `filter` params: [\"wrong\"]",
+ status: "error",
+ extension: "postgres_changes",
+ channel: "test"
+ },
+ 3000
+
+ socket = Server.socket(socket.channel_pid)
+
+ # It won't re-subscribe
+ assert socket.assigns.pg_sub_ref == nil
+ end
+
+ test "invalid subscription table does not exist", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [%{"event" => "*", "schema" => "public", "table" => "doesnotexist"}]
+ }
+
+ assert {:ok, reply, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{"event" => "*", "schema" => "public", "table" => "doesnotexist"}]} = reply
+
+ assert_push "system",
+ %{
+ message:
+ "Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [schema: public, table: doesnotexist, filters: []]",
+ status: "error",
+ extension: "postgres_changes",
+ channel: "test"
+ },
+ 5000
+
+ socket = Server.socket(socket.channel_pid)
+
+ # It won't re-subscribe
+ assert socket.assigns.pg_sub_ref == nil
+ end
+
+ test "invalid subscription column does not exist", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [
+ %{"event" => "*", "schema" => "public", "table" => "test", "filter" => "notacolumn=eq.123"}
+ ]
+ }
+
+ assert {:ok, reply, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{"event" => "*", "schema" => "public", "table" => "test"}]} = reply
+
+ assert_push "system",
+ %{
+ message:
+ "Unable to subscribe to changes with given parameters. An exception happened so please check your connect parameters: [schema: public, table: test, filters: [{\"notacolumn\", \"eq\", \"123\"}]]. Exception: ERROR P0001 (raise_exception) invalid column for filter notacolumn",
+ status: "error",
+ extension: "postgres_changes",
+ channel: "test"
+ },
+ 5000
+
+ socket = Server.socket(socket.channel_pid)
+
+ # It won't re-subscribe
+ assert socket.assigns.pg_sub_ref == nil
+ end
+
+ test "connection error", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [%{"event" => "*", "schema" => "public", "table" => "test"}]
+ }
+
+ conn = spawn(fn -> :ok end)
+ # Let's set the subscription manager conn to be a pid that is no more
+
+ assert {:ok, reply, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{"event" => "*", "schema" => "public", "table" => "test"}]} = reply
+
+ assert_push "system",
+ %{
+ message: "Subscribed to PostgreSQL",
+ status: "ok",
+ extension: "postgres_changes",
+ channel: "test"
+ },
+ 5000
+
+ {:ok, manager_pid, _conn} = Extensions.PostgresCdcRls.get_manager_conn(tenant.external_id)
+ Extensions.PostgresCdcRls.update_meta(tenant.external_id, manager_pid, conn)
+
+ assert {:ok, _reply, socket} = subscribe_and_join(socket, "realtime:test_fail", %{"config" => config})
+
+ assert_push "system",
+ %{message: message, status: "error", extension: "postgres_changes", channel: "test_fail"},
+ 5000
+
+ assert message =~ "{:error, \"Too many database timeouts\"}"
+ socket = Server.socket(socket.channel_pid)
+
+ # It will try again in the future
+ assert socket.assigns.pg_sub_ref != nil
+ end
+ end
+
+ describe "broadcast" do
+ @describetag policies: [:authenticated_all_topic_read]
+
+ test "broadcast map payload", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "broadcast" => %{"self" => true}
+ }
+
+ assert {:ok, _, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ push(socket, "broadcast", %{"event" => "my_event", "payload" => %{"hello" => "world"}})
+
+ assert_receive %Phoenix.Socket.Message{
+ topic: "realtime:test",
+ event: "broadcast",
+ payload: %{"event" => "my_event", "payload" => %{"hello" => "world"}}
+ }
+ end
+
+ test "broadcast non-map payload", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "broadcast" => %{"self" => true}
+ }
+
+ assert {:ok, _, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ push(socket, "broadcast", "not a map")
+
+ assert_receive %Phoenix.Socket.Message{
+ topic: "realtime:test",
+ event: "broadcast",
+ payload: "not a map"
+ }
+ end
+
+ test "wrong replay params", %{tenant: tenant} do
jwt = Generators.generate_jwt_token(tenant)
{:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
- assert {:ok, _, %Socket{} = socket} = subscribe_and_join(socket, "realtime:test", %{})
+ config = %{
+ "private" => true,
+ "broadcast" => %{
+ "replay" => %{"limit" => "not a number", "since" => :erlang.system_time(:millisecond) - 5 * 60000}
+ }
+ }
- presence_diff = %Socket.Broadcast{event: "presence_diff", payload: %{joins: %{}, leaves: %{}}}
- send(socket.channel_pid, presence_diff)
+ assert {:error, %{reason: "UnableToReplayMessages: Replay params are not valid"}} =
+ subscribe_and_join(socket, "realtime:test", %{"config" => config})
- assert_receive %Socket.Message{topic: "realtime:test", event: "presence_state", payload: %{}}
+ config = %{
+ "private" => true,
+ "broadcast" => %{
+ "replay" => %{"limit" => 1, "since" => "not a number"}
+ }
+ }
+
+ assert {:error, %{reason: "UnableToReplayMessages: Replay params are not valid"}} =
+ subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ config = %{
+ "private" => true,
+ "broadcast" => %{
+ "replay" => %{}
+ }
+ }
+
+ assert {:error, %{reason: "UnableToReplayMessages: Replay params are not valid"}} =
+ subscribe_and_join(socket, "realtime:test", %{"config" => config})
+ end
+
+ test "failure to replay", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ config = %{
+ "private" => true,
+ "broadcast" => %{
+ "replay" => %{"limit" => 12, "since" => :erlang.system_time(:millisecond) - 5 * 60000}
+ }
+ }
+
+ Authorization
+ |> expect(:get_read_authorizations, fn _, _, _ ->
+ {:ok,
+ %Authorization.Policies{
+ broadcast: %Authorization.Policies.BroadcastPolicies{read: true, write: nil}
+ }}
+ end)
+
+ # Broken database connection
+ conn = spawn(fn -> :ok end)
+ Connect.lookup_or_start_connection(tenant.external_id)
+ {:ok, _} = :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: conn} end)
+
+ assert {:error, %{reason: "UnableToReplayMessages: Realtime was unable to replay messages"}} =
+ subscribe_and_join(socket, "realtime:test", %{"config" => config})
+ end
+
+ test "replay messages on public topic not allowed", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "broadcast" => %{"replay" => %{"limit" => 2, "since" => :erlang.system_time(:millisecond) - 5 * 60000}}
+ }
+
+ assert {
+ :error,
+ %{reason: "UnableToReplayMessages: Replay is not allowed for public channels"}
+ } = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ refute_receive %Socket.Message{}
+ refute_receive %Socket.Reply{}
+ end
+
+ @tag policies: [:authenticated_all_topic_read]
+ test "replay messages on private topic", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ # Old message
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :day),
+ "event" => "old",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "old"}
+ })
+
+ %{id: message1_id} =
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "first",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "first"}
+ })
+
+ %{id: message2_id} =
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "second",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "second"}
+ })
+
+ # This one should not be received because of the limit
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-3, :minute),
+ "event" => "third",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "third"}
+ })
+
+ config = %{
+ "private" => true,
+ "presence" => %{"enabled" => false},
+ "broadcast" => %{"replay" => %{"limit" => 2, "since" => :erlang.system_time(:millisecond) - 5 * 60000}}
+ }
+
+ assert {:ok, _, %Socket{}} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
assert_receive %Socket.Message{
topic: "realtime:test",
- event: "presence_diff",
- payload: %{joins: %{}, leaves: %{}}
+ event: "broadcast",
+ payload: %{
+ "event" => "first",
+ "meta" => %{"id" => ^message1_id, "replayed" => true},
+ "payload" => %{"value" => "first"},
+ "type" => "broadcast"
+ }
}
- tenant_id = tenant.external_id
+ assert_receive %Socket.Message{
+ topic: "realtime:test",
+ event: "broadcast",
+ payload: %{
+ "event" => "second",
+ "meta" => %{"id" => ^message2_id, "replayed" => true},
+ "payload" => %{"value" => "second"},
+ "type" => "broadcast"
+ }
+ }
+
+ refute_receive %Socket.Message{}
+ end
+ end
+
+ describe "presence" do
+ test "presence state event is counted", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
- # Wait for RateCounter to tick
- Process.sleep(1100)
+ assert {:ok, _, %Socket{} = socket} = subscribe_and_join(socket, "realtime:test", %{})
+
+ assert_receive %Socket.Message{topic: "realtime:test", event: "presence_state", payload: %{}}
+
+ tenant_id = tenant.external_id
assert {:ok, %RateCounter{id: {:channel, :presence_events, ^tenant_id}, bucket: bucket}} =
- RateCounter.get(socket.assigns.presence_rate_counter)
+ RateCounterHelper.tick!(socket.assigns.presence_rate_counter)
+
+ # presence_state
+ assert Enum.sum(bucket) == 1
+ end
+
+ test "presence track closes on high payload size", %{tenant: tenant} do
+ topic = "realtime:test"
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ assert {:ok, _, %Socket{} = socket} = subscribe_and_join(socket, topic, %{})
+
+ assert_receive %Phoenix.Socket.Message{topic: "realtime:test", event: "presence_state"}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802, content: String.duplicate("a", 3_500_000)}
+ }
+
+ push(socket, "presence", payload)
+
+ assert_receive %Phoenix.Socket.Message{
+ event: "system",
+ payload: %{
+ extension: "system",
+ message: "Track message size exceeded",
+ status: "error"
+ },
+ topic: ^topic
+ },
+ 500
+ end
+
+ test "presence track with same payload does nothing", %{tenant: tenant} do
+ topic = "realtime:test"
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ assert {:ok, _, %Socket{} = socket} =
+ subscribe_and_join(socket, topic, %{config: %{presence: %{enabled: true, key: "my_key"}}})
- # presence_state + presence_diff
- assert 2 in bucket
+ assert_receive %Phoenix.Socket.Message{topic: "realtime:test", event: "presence_state"}, 500
+
+ payload = %{type: "presence", event: "TRACK", payload: %{"hello" => "world"}}
+
+ push(socket, "presence", payload)
+
+ assert_receive %Socket.Reply{payload: %{}, topic: "realtime:test", status: :ok}, 500
+
+ assert_receive %Socket.Message{
+ payload: %{
+ joins: %{"my_key" => %{metas: [%{:phx_ref => _, "hello" => "world"}]}},
+ leaves: %{}
+ },
+ topic: "realtime:test",
+ event: "presence_diff"
+ },
+ 500
+
+ push(socket, "presence", payload)
+
+ assert_receive %Socket.Reply{payload: %{}, topic: "realtime:test", status: :ok}, 500
+ # no presence_diff this time
+
+ refute_receive %Socket.Message{}
+ refute_receive %Socket.Reply{}
end
end
@@ -762,7 +1262,10 @@ defmodule RealtimeWeb.RealtimeChannelTest do
put_in(extension, ["settings", "db_port"], db_port)
]
- Realtime.Api.update_tenant(tenant, %{extensions: extensions})
+ with {:ok, tenant} <- Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions}) do
+ Realtime.Tenants.Cache.update_cache(tenant)
+ {:ok, tenant}
+ end
end
defp assert_process_down(pid) do
diff --git a/test/realtime_web/channels/tenant_rate_limiters_test.exs b/test/realtime_web/channels/tenant_rate_limiters_test.exs
new file mode 100644
index 000000000..05d56ec82
--- /dev/null
+++ b/test/realtime_web/channels/tenant_rate_limiters_test.exs
@@ -0,0 +1,31 @@
+defmodule RealtimeWeb.TenantRateLimitersTest do
+ use Realtime.DataCase, async: true
+
+ use Mimic
+ alias RealtimeWeb.TenantRateLimiters
+ alias Realtime.Api.Tenant
+
+ setup do
+ tenant = %Tenant{external_id: random_string(), max_concurrent_users: 1, max_joins_per_second: 1}
+
+ %{tenant: tenant}
+ end
+
+ describe "check_tenant/1" do
+ test "rate is not exceeded", %{tenant: tenant} do
+ assert TenantRateLimiters.check_tenant(tenant) == :ok
+ end
+
+ test "max concurrent users is exceeded", %{tenant: tenant} do
+ Realtime.UsersCounter.add(self(), tenant.external_id)
+
+ assert TenantRateLimiters.check_tenant(tenant) == {:error, :too_many_connections}
+ end
+
+ test "max joins is exceeded", %{tenant: tenant} do
+ expect(Realtime.RateCounter, :get, fn _ -> {:ok, %{limit: %{triggered: true}}} end)
+
+ assert TenantRateLimiters.check_tenant(tenant) == {:error, :too_many_joins}
+ end
+ end
+end
diff --git a/test/realtime_web/controllers/broadcast_controller_test.exs b/test/realtime_web/controllers/broadcast_controller_test.exs
index 9c38d58bd..73ab4148e 100644
--- a/test/realtime_web/controllers/broadcast_controller_test.exs
+++ b/test/realtime_web/controllers/broadcast_controller_test.exs
@@ -18,7 +18,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
setup %{conn: conn} do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
conn = generate_conn(conn, tenant)
@@ -141,16 +141,38 @@ defmodule RealtimeWeb.BroadcastControllerTest do
assert conn.status == 422
- # Wait for counters to increment. RateCounter tick is 1 second
- Process.sleep(2000)
- {:ok, rate_counter} = RateCounter.get(Tenants.requests_per_second_rate(tenant))
+ {:ok, rate_counter} = RateCounterHelper.tick!(Tenants.requests_per_second_rate(tenant))
assert rate_counter.avg != 0.0
- {:ok, rate_counter} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, rate_counter} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert rate_counter.avg == 0.0
refute_receive {:socket_push, _, _}
end
+
+ test "returns 422 when batch of messages includes a message that exceeds the tenant payload size", %{
+ conn: conn,
+ tenant: tenant
+ } do
+ sub_topic_1 = "sub_topic_1"
+ sub_topic_2 = "sub_topic_2"
+
+ payload_1 = %{"data" => "data"}
+ payload_2 = %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 100)}
+ event_1 = "event_1"
+ event_2 = "event_2"
+
+ conn =
+ post(conn, Routes.broadcast_path(conn, :broadcast), %{
+ "messages" => [
+ %{"topic" => sub_topic_1, "payload" => payload_1, "event" => event_1},
+ %{"topic" => sub_topic_1, "payload" => payload_1, "event" => event_1},
+ %{"topic" => sub_topic_2, "payload" => payload_2, "event" => event_2}
+ ]
+ })
+
+ assert conn.status == 422
+ end
end
describe "too many requests" do
@@ -272,7 +294,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
} do
request_events_key = Tenants.requests_per_second_key(tenant)
broadcast_events_key = Tenants.events_per_second_key(tenant)
- expect(TenantBroadcaster, :pubsub_broadcast, 5, fn _, _, _, _ -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 5, fn _, _, _, _, _ -> :ok end)
messages_to_send =
Stream.repeatedly(fn -> generate_message_with_policies(db_conn, tenant) end)
@@ -294,7 +316,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{"messages" => messages})
- broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/4)
+ broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/5)
Enum.each(messages_to_send, fn %{topic: topic} ->
broadcast_topic = Tenants.tenant_topic(tenant, topic, false)
@@ -310,7 +332,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
}
assert Enum.any?(broadcast_calls, fn
- [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher] -> true
+ [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher, :broadcast] -> true
_ -> false
end)
end)
@@ -326,7 +348,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
} do
request_events_key = Tenants.requests_per_second_key(tenant)
broadcast_events_key = Tenants.events_per_second_key(tenant)
- expect(TenantBroadcaster, :pubsub_broadcast, 6, fn _, _, _, _ -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 6, fn _, _, _, _, _ -> :ok end)
channels =
Stream.repeatedly(fn -> generate_message_with_policies(db_conn, tenant) end)
@@ -358,7 +380,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{"messages" => messages})
- broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/4)
+ broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/5)
Enum.each(channels, fn %{topic: topic} ->
broadcast_topic = Tenants.tenant_topic(tenant, topic, false)
@@ -374,7 +396,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
}
assert Enum.count(broadcast_calls, fn
- [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher] -> true
+ [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher, :broadcast] -> true
_ -> false
end) == 1
end)
@@ -393,7 +415,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
open_channel_topic = Tenants.tenant_topic(tenant, "open_channel", true)
assert Enum.count(broadcast_calls, fn
- [_, ^open_channel_topic, ^message, RealtimeChannel.MessageDispatcher] -> true
+ [_, ^open_channel_topic, ^message, RealtimeChannel.MessageDispatcher, :broadcast] -> true
_ -> false
end) == 1
@@ -408,7 +430,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
} do
request_events_key = Tenants.requests_per_second_key(tenant)
broadcast_events_key = Tenants.events_per_second_key(tenant)
- expect(TenantBroadcaster, :pubsub_broadcast, 5, fn _, _, _, _ -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 5, fn _, _, _, _, _ -> :ok end)
messages_to_send =
Stream.repeatedly(fn -> generate_message_with_policies(db_conn, tenant) end)
@@ -428,11 +450,12 @@ defmodule RealtimeWeb.BroadcastControllerTest do
GenCounter
|> expect(:add, fn ^request_events_key -> :ok end)
- |> expect(:add, length(messages_to_send), fn ^broadcast_events_key -> :ok end)
+ # remove the one message that won't be broadcasted for this user
+ |> expect(:add, length(messages) - 1, fn ^broadcast_events_key -> :ok end)
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{"messages" => messages})
- broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/4)
+ broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/5)
Enum.each(messages_to_send, fn %{topic: topic} ->
broadcast_topic = Tenants.tenant_topic(tenant, topic, false)
@@ -448,7 +471,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
}
assert Enum.count(broadcast_calls, fn
- [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher] -> true
+ [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher, :broadcast] -> true
_ -> false
end) == 1
end)
@@ -461,7 +484,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
@tag role: "anon"
test "user without permission won't broadcast", %{conn: conn, db_conn: db_conn, tenant: tenant} do
request_events_key = Tenants.requests_per_second_key(tenant)
- reject(&TenantBroadcaster.pubsub_broadcast/4)
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
messages =
Stream.repeatedly(fn -> generate_message_with_policies(db_conn, tenant) end)
@@ -482,7 +505,6 @@ defmodule RealtimeWeb.BroadcastControllerTest do
GenCounter
|> expect(:add, fn ^request_events_key -> 1 end)
- |> reject(:add, 1)
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{"messages" => messages})
diff --git a/test/realtime_web/controllers/metrics_controller_test.exs b/test/realtime_web/controllers/metrics_controller_test.exs
index f16edc83f..52453271c 100644
--- a/test/realtime_web/controllers/metrics_controller_test.exs
+++ b/test/realtime_web/controllers/metrics_controller_test.exs
@@ -2,11 +2,23 @@ defmodule RealtimeWeb.MetricsControllerTest do
# Usage of Clustered
# Also changing Application env
use RealtimeWeb.ConnCase, async: false
+ alias Realtime.GenRpc
import ExUnit.CaptureLog
+ use Mimic
setup_all do
- {:ok, _} = Clustered.start(nil, extra_config: [{:realtime, :region, "ap-southeast-2"}])
+ metrics_tags = %{
+ region: "ap-southeast-2",
+ host: "anothernode@something.com",
+ id: "someid"
+ }
+
+ {:ok, _} =
+ Clustered.start(nil,
+ extra_config: [{:realtime, :region, "ap-southeast-2"}, {:realtime, :metrics_tags, metrics_tags}]
+ )
+
:ok
end
@@ -30,14 +42,18 @@ defmodule RealtimeWeb.MetricsControllerTest do
assert response =~
"# HELP beam_system_schedulers_online_info The number of scheduler threads that are online."
- assert response =~ "region=\"ap-southeast-2"
- assert response =~ "region=\"us-east-1"
+ assert response =~ "region=\"ap-southeast-2\""
+ assert response =~ "region=\"us-east-1\""
end
test "returns 200 and log on timeout", %{conn: conn} do
- current_value = Application.get_env(:realtime, :metrics_rpc_timeout)
- on_exit(fn -> Application.put_env(:realtime, :metrics_rpc_timeout, current_value) end)
- Application.put_env(:realtime, :metrics_rpc_timeout, 0)
+ Mimic.stub(GenRpc, :call, fn node, mod, func, args, opts ->
+ if node != node() do
+ {:error, :rpc_error, :timeout}
+ else
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end
+ end)
log =
capture_log(fn ->
@@ -74,4 +90,64 @@ defmodule RealtimeWeb.MetricsControllerTest do
|> response(403)
end
end
+
+ describe "GET /metrics/:region" do
+ setup %{conn: conn} do
+ # The metrics pipeline requires authentication
+ jwt_secret = Application.fetch_env!(:realtime, :metrics_jwt_secret)
+ token = generate_jwt_token(jwt_secret, %{})
+ authenticated_conn = put_req_header(conn, "authorization", "Bearer #{token}")
+
+ {:ok, conn: authenticated_conn}
+ end
+
+ test "returns 200", %{conn: conn} do
+ assert response =
+ conn
+ |> get(~p"/metrics/ap-southeast-2")
+ |> text_response(200)
+
+ # Check prometheus like metrics
+ assert response =~
+ "# HELP beam_system_schedulers_online_info The number of scheduler threads that are online."
+
+ assert response =~ "region=\"ap-southeast-2\""
+ refute response =~ "region=\"us-east-1\""
+ end
+
+ test "returns 200 and log on timeout", %{conn: conn} do
+ Mimic.stub(GenRpc, :call, fn _node, _mod, _func, _args, _opts ->
+ {:error, :rpc_error, :timeout}
+ end)
+
+ log =
+ capture_log(fn ->
+ assert response =
+ conn
+ |> get(~p"/metrics/ap-southeast-2")
+ |> text_response(200)
+
+ assert response == ""
+ end)
+
+ assert log =~ "Cannot fetch metrics from the node"
+ end
+
+ test "returns 403 when authorization header is missing", %{conn: conn} do
+ assert conn
+ |> delete_req_header("authorization")
+ |> get(~p"/metrics/ap-southeast-2")
+ |> response(403)
+ end
+
+ test "returns 403 when authorization header is wrong", %{conn: conn} do
+ token = generate_jwt_token("bad_secret", %{})
+
+ assert _ =
+ conn
+ |> put_req_header("authorization", "Bearer #{token}")
+ |> get(~p"/metrics/ap-southeast-2")
+ |> response(403)
+ end
+ end
end
diff --git a/test/realtime_web/controllers/tenant_controller_test.exs b/test/realtime_web/controllers/tenant_controller_test.exs
index 3974e7e7b..95c7ab762 100644
--- a/test/realtime_web/controllers/tenant_controller_test.exs
+++ b/test/realtime_web/controllers/tenant_controller_test.exs
@@ -334,8 +334,9 @@ defmodule RealtimeWeb.TenantControllerTest do
setup [:with_tenant]
setup do
+ previous_region = Application.get_env(:realtime, :region)
Application.put_env(:realtime, :region, "us-east-1")
- on_exit(fn -> Application.put_env(:realtime, :region, nil) end)
+ on_exit(fn -> Application.put_env(:realtime, :region, previous_region) end)
end
test "health check when tenant does not exist", %{conn: conn} do
@@ -418,7 +419,7 @@ defmodule RealtimeWeb.TenantControllerTest do
conn = get(conn, ~p"/api/tenants/#{tenant.external_id}/health")
data = json_response(conn, 200)["data"]
- Process.sleep(2000)
+ Process.sleep(1000)
assert {:ok, %{rows: []}} = Postgrex.query(db_conn, "SELECT * FROM realtime.messages", [])
diff --git a/test/realtime_web/live/status_live/index_test.exs b/test/realtime_web/live/status_live/index_test.exs
new file mode 100644
index 000000000..ae3af0ad0
--- /dev/null
+++ b/test/realtime_web/live/status_live/index_test.exs
@@ -0,0 +1,33 @@
+defmodule RealtimeWeb.StatusLive.IndexTest do
+ use RealtimeWeb.ConnCase
+ import Phoenix.LiveViewTest
+
+ alias Realtime.Latency.Payload
+ alias Realtime.Nodes
+ alias RealtimeWeb.Endpoint
+
+ describe "Status LiveView" do
+ test "renders status page", %{conn: conn} do
+ {:ok, _view, html} = live(conn, ~p"/status")
+
+ assert html =~ "Realtime Status"
+ end
+
+ test "receives broadcast from PubSub", %{conn: conn} do
+ {:ok, view, _html} = live(conn, ~p"/status")
+
+ payload = %Payload{
+ from_node: Nodes.short_node_id_from_name(:"pink@127.0.0.1"),
+ node: Nodes.short_node_id_from_name(:"orange@127.0.0.1"),
+ latency: "42ms",
+ timestamp: DateTime.utc_now()
+ }
+
+ Endpoint.broadcast("admin:cluster", "ping", payload)
+
+ html = render(view)
+ assert html =~ "42ms"
+ assert html =~ "pink@127.0.0.1_orange@127.0.0.1"
+ end
+ end
+end
diff --git a/test/realtime_web/plugs/rate_limiter_test.exs b/test/realtime_web/plugs/rate_limiter_test.exs
index 78b22fc8f..1cca58346 100644
--- a/test/realtime_web/plugs/rate_limiter_test.exs
+++ b/test/realtime_web/plugs/rate_limiter_test.exs
@@ -47,9 +47,7 @@ defmodule RealtimeWeb.Plugs.RateLimiterTest do
end
test "serve a 200 when rate limit is set to 100", %{conn: conn} do
- {:ok, _tenant} =
- Api.get_tenant_by_external_id(@tenant["external_id"])
- |> Api.update_tenant(%{"max_events_per_second" => 100})
+ {:ok, _tenant} = Api.update_tenant_by_external_id(@tenant["external_id"], %{"max_events_per_second" => 100})
conn =
conn
diff --git a/test/realtime_web/socket/v2_serializer_test.exs b/test/realtime_web/socket/v2_serializer_test.exs
new file mode 100644
index 000000000..2d83e1ea1
--- /dev/null
+++ b/test/realtime_web/socket/v2_serializer_test.exs
@@ -0,0 +1,553 @@
+defmodule RealtimeWeb.Socket.V2SerializerTest do
+ use ExUnit.Case, async: true
+
+ alias Phoenix.Socket.{Broadcast, Message, Reply}
+ alias RealtimeWeb.Socket.UserBroadcast
+ alias RealtimeWeb.Socket.V2Serializer
+
+ @serializer V2Serializer
+ @v2_fastlane_json "[null,null,\"t\",\"e\",{\"m\":1}]"
+ @v2_msg_json "[null,null,\"t\",\"e\",{\"m\":1}]"
+
+ @client_push <<
+ # push
+ 0::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # event_size
+ 5,
+ "12",
+ "123",
+ "topic",
+ "event",
+ 101,
+ 102,
+ 103
+ >>
+
+ @client_binary_user_broadcast_push <<
+ # user broadcast push
+ 3::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 0,
+ # binary encoding
+ 0::size(8),
+ "12",
+ "123",
+ "topic",
+ "user_event",
+ 101,
+ 102,
+ 103
+ >>
+
+ @client_json_user_broadcast_push <<
+ # user broadcast push
+ 3::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 0,
+ # json encoding
+ 1::size(8),
+ "12",
+ "123",
+ "topic",
+ "user_event",
+ 123,
+ 34,
+ 97,
+ 34,
+ 58,
+ 34,
+ 98,
+ 34,
+ 125
+ >>
+
+ @client_binary_user_broadcast_push_with_metadata <<
+ # user broadcast push
+ 3::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 14,
+ # binary encoding
+ 0::size(8),
+ "12",
+ "123",
+ "topic",
+ "user_event",
+ ~s<{"store":true}>,
+ 101,
+ 102,
+ 103
+ >>
+
+ @reply <<
+ # reply
+ 1::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # status_size
+ 2,
+ "12",
+ "123",
+ "topic",
+ "ok",
+ 101,
+ 102,
+ 103
+ >>
+
+ @broadcast <<
+ # broadcast
+ 2::size(8),
+ # topic_size
+ 5,
+ # event_size
+ 5,
+ "topic",
+ "event",
+ 101,
+ 102,
+ 103
+ >>
+
+ @binary_user_broadcast <<
+ # user broadcast
+ 4::size(8),
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 17,
+ # binary encoding
+ 0::size(8),
+ "topic",
+ "user_event",
+ # metadata
+ 123,
+ 34,
+ 114,
+ 101,
+ 112,
+ 108,
+ 97,
+ 121,
+ 101,
+ 100,
+ 34,
+ 58,
+ 116,
+ 114,
+ 117,
+ 101,
+ 125,
+ # payload
+ 101,
+ 102,
+ 103
+ >>
+
+ @binary_user_broadcast_no_metadata <<
+ # user broadcast
+ 4::size(8),
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 0,
+ # binary encoding
+ 0::size(8),
+ "topic",
+ "user_event",
+ # metadata
+ # payload
+ 101,
+ 102,
+ 103
+ >>
+
+ @json_user_broadcast <<
+ # user broadcast
+ 4::size(8),
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 17,
+ # json encoding
+ 1::size(8),
+ "topic",
+ "user_event",
+ # metadata
+ 123,
+ 34,
+ 114,
+ 101,
+ 112,
+ 108,
+ 97,
+ 121,
+ 101,
+ 100,
+ 34,
+ 58,
+ 116,
+ 114,
+ 117,
+ 101,
+ 125,
+ # payload
+ 123,
+ 34,
+ 97,
+ 34,
+ 58,
+ 34,
+ 98,
+ 34,
+ 125
+ >>
+
+ @json_user_broadcast_no_metadata <<
+ # broadcast
+ 4::size(8),
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 0,
+ # json encoding
+ 1::size(8),
+ "topic",
+ "user_event",
+ # metadata
+ # payload
+ 123,
+ 34,
+ 97,
+ 34,
+ 58,
+ 34,
+ 98,
+ 34,
+ 125
+ >>
+
+ defp encode!(serializer, msg) do
+ case serializer.encode!(msg) do
+ {:socket_push, :text, encoded} ->
+ assert is_list(encoded)
+ IO.iodata_to_binary(encoded)
+
+ {:socket_push, :binary, encoded} ->
+ assert is_binary(encoded)
+ encoded
+ end
+ end
+
+ defp decode!(serializer, msg, opts), do: serializer.decode!(msg, opts)
+
+ defp fastlane!(serializer, msg) do
+ case serializer.fastlane!(msg) do
+ {:socket_push, :text, encoded} ->
+ assert is_list(encoded)
+ IO.iodata_to_binary(encoded)
+
+ {:socket_push, :binary, encoded} ->
+ assert is_binary(encoded)
+ encoded
+ end
+ end
+
+ test "encode!/1 encodes `Phoenix.Socket.Message` as JSON" do
+ msg = %Message{topic: "t", event: "e", payload: %{m: 1}}
+ assert encode!(@serializer, msg) == @v2_msg_json
+ end
+
+ test "encode!/1 raises when payload is not a map" do
+ msg = %Message{topic: "t", event: "e", payload: "invalid"}
+ assert_raise ArgumentError, fn -> encode!(@serializer, msg) end
+ end
+
+ test "encode!/1 encodes `Phoenix.Socket.Reply` as JSON" do
+ msg = %Reply{topic: "t", payload: %{m: 1}}
+ encoded = encode!(@serializer, msg)
+
+ assert Jason.decode!(encoded) == [
+ nil,
+ nil,
+ "t",
+ "phx_reply",
+ %{"response" => %{"m" => 1}, "status" => nil}
+ ]
+ end
+
+ test "decode!/2 decodes `Phoenix.Socket.Message` from JSON" do
+ assert %Message{topic: "t", event: "e", payload: %{"m" => 1}} ==
+ decode!(@serializer, @v2_msg_json, opcode: :text)
+ end
+
+ test "fastlane!/1 encodes a broadcast into a message as JSON" do
+ msg = %Broadcast{topic: "t", event: "e", payload: %{m: 1}}
+ assert fastlane!(@serializer, msg) == @v2_fastlane_json
+ end
+
+ test "fastlane!/1 raises when payload is not a map" do
+ msg = %Broadcast{topic: "t", event: "e", payload: "invalid"}
+ assert_raise ArgumentError, fn -> fastlane!(@serializer, msg) end
+ end
+
+ describe "binary encode" do
+ test "general pushed message" do
+ push = <<
+ # push
+ 0::size(8),
+ # join_ref_size
+ 2,
+ # topic_size
+ 5,
+ # event_size
+ 5,
+ "12",
+ "topic",
+ "event",
+ 101,
+ 102,
+ 103
+ >>
+
+ assert encode!(@serializer, %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: nil,
+ topic: "topic",
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ }) == push
+ end
+
+ test "encode with oversized headers" do
+ assert_raise ArgumentError, ~r/unable to convert topic to binary/, fn ->
+ encode!(@serializer, %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: nil,
+ topic: String.duplicate("t", 256),
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert event to binary/, fn ->
+ encode!(@serializer, %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: nil,
+ topic: "topic",
+ event: String.duplicate("e", 256),
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert join_ref to binary/, fn ->
+ encode!(@serializer, %Phoenix.Socket.Message{
+ join_ref: String.duplicate("j", 256),
+ ref: nil,
+ topic: "topic",
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+ end
+
+ test "reply" do
+ assert encode!(@serializer, %Phoenix.Socket.Reply{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ status: :ok,
+ payload: {:binary, <<101, 102, 103>>}
+ }) == @reply
+ end
+
+ test "reply with oversized headers" do
+ assert_raise ArgumentError, ~r/unable to convert ref to binary/, fn ->
+ encode!(@serializer, %Phoenix.Socket.Reply{
+ join_ref: "12",
+ ref: String.duplicate("r", 256),
+ topic: "topic",
+ status: :ok,
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+ end
+
+ test "fastlane binary Broadcast" do
+ assert fastlane!(@serializer, %Broadcast{
+ topic: "topic",
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ }) == @broadcast
+ end
+
+ test "fastlane binary UserBroadcast" do
+ assert fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ metadata: %{"replayed" => true},
+ user_payload_encoding: :binary,
+ user_payload: <<101, 102, 103>>
+ }) == @binary_user_broadcast
+ end
+
+ test "fastlane binary UserBroadcast no metadata" do
+ assert fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ metadata: nil,
+ user_payload_encoding: :binary,
+ user_payload: <<101, 102, 103>>
+ }) == @binary_user_broadcast_no_metadata
+ end
+
+ test "fastlane json UserBroadcast" do
+ assert fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ metadata: %{"replayed" => true},
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ }) == @json_user_broadcast
+ end
+
+ test "fastlane json UserBroadcast no metadata" do
+ assert fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ }) == @json_user_broadcast_no_metadata
+ end
+
+ test "fastlane with oversized headers" do
+ assert_raise ArgumentError, ~r/unable to convert topic to binary/, fn ->
+ fastlane!(@serializer, %Broadcast{
+ topic: String.duplicate("t", 256),
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert event to binary/, fn ->
+ fastlane!(@serializer, %Broadcast{
+ topic: "topic",
+ event: String.duplicate("e", 256),
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert topic to binary/, fn ->
+ fastlane!(@serializer, %UserBroadcast{
+ topic: String.duplicate("t", 256),
+ user_event: "user_event",
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert user_event to binary/, fn ->
+ fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: String.duplicate("e", 256),
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert metadata to binary/, fn ->
+ fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ metadata: %{k: String.duplicate("e", 256)},
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ })
+ end
+ end
+ end
+
+ describe "binary decode" do
+ test "pushed message" do
+ assert decode!(@serializer, @client_push, opcode: :binary) == %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ }
+ end
+
+ test "binary user pushed message with metadata" do
+ assert decode!(@serializer, @client_binary_user_broadcast_push_with_metadata, opcode: :binary) ==
+ %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ event: "broadcast",
+ payload: {"user_event", :binary, <<101, 102, 103>>, %{"store" => true}}
+ }
+ end
+
+ test "binary user pushed message" do
+ assert decode!(@serializer, @client_binary_user_broadcast_push, opcode: :binary) == %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ event: "broadcast",
+ payload: {"user_event", :binary, <<101, 102, 103>>, %{}}
+ }
+ end
+
+ test "json binary user pushed message" do
+ assert decode!(@serializer, @client_json_user_broadcast_push, opcode: :binary) == %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ event: "broadcast",
+ payload: {"user_event", :json, "{\"a\":\"b\"}", %{}}
+ }
+ end
+ end
+end
diff --git a/test/realtime_web/tenant_broadcaster_test.exs b/test/realtime_web/tenant_broadcaster_test.exs
index d9afbf641..163a1236b 100644
--- a/test/realtime_web/tenant_broadcaster_test.exs
+++ b/test/realtime_web/tenant_broadcaster_test.exs
@@ -1,5 +1,5 @@
defmodule RealtimeWeb.TenantBroadcasterTest do
- # Usage of Clustered
+ # Usage of Clustered and changing Application env
use Realtime.DataCase, async: false
alias Phoenix.Socket.Broadcast
@@ -33,6 +33,7 @@ defmodule RealtimeWeb.TenantBroadcasterTest do
end
setup context do
+ tenant_id = random_string()
Endpoint.subscribe(@topic)
:erpc.call(context.node, Subscriber, :subscribe, [self(), @topic])
@@ -44,100 +45,208 @@ defmodule RealtimeWeb.TenantBroadcasterTest do
__MODULE__,
[:realtime, :tenants, :payload, :size],
&__MODULE__.handle_telemetry/4,
- pid: self()
+ %{pid: self(), tenant: tenant_id}
)
- :ok
+ original = Application.fetch_env!(:realtime, :pubsub_adapter)
+ on_exit(fn -> Application.put_env(:realtime, :pubsub_adapter, original) end)
+ Application.put_env(:realtime, :pubsub_adapter, context.pubsub_adapter)
+
+ {:ok, tenant_id: tenant_id}
end
- describe "pubsub_broadcast/4" do
- test "pubsub_broadcast", %{node: node} do
- message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
- TenantBroadcaster.pubsub_broadcast("realtime-dev", @topic, message, Phoenix.PubSub)
+ for pubsub_adapter <- [:gen_rpc, :pg2] do
+ describe "pubsub_broadcast/5 #{pubsub_adapter}" do
+ @describetag pubsub_adapter: pubsub_adapter
- assert_receive ^message
+ test "pubsub_broadcast", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
+ TenantBroadcaster.pubsub_broadcast(tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
- # Remote node received the broadcast
- assert_receive {:relay, ^node, ^message}
+ assert_receive ^message
- assert_receive {
- :telemetry,
- [:realtime, :tenants, :payload, :size],
- %{size: 114},
- %{tenant: "realtime-dev"}
- }
- end
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 114},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
+
+ test "pubsub_broadcast list payload", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
+ TenantBroadcaster.pubsub_broadcast(tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+
+ assert_receive ^message
- test "pubsub_broadcast list payload", %{node: node} do
- message = %Broadcast{topic: @topic, event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
- TenantBroadcaster.pubsub_broadcast("realtime-dev", @topic, message, Phoenix.PubSub)
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
- assert_receive ^message
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 130},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
- # Remote node received the broadcast
- assert_receive {:relay, ^node, ^message}
+ test "pubsub_broadcast string payload", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: "some text payload"}
+ TenantBroadcaster.pubsub_broadcast(tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
- assert_receive {
- :telemetry,
- [:realtime, :tenants, :payload, :size],
- %{size: 130},
- %{tenant: "realtime-dev"}
- }
+ assert_receive ^message
+
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 119},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
end
- test "pubsub_broadcast string payload", %{node: node} do
- message = %Broadcast{topic: @topic, event: "an event", payload: "some text payload"}
- TenantBroadcaster.pubsub_broadcast("realtime-dev", @topic, message, Phoenix.PubSub)
+ describe "pubsub_broadcast_from/6 #{pubsub_adapter}" do
+ @describetag pubsub_adapter: pubsub_adapter
+
+ test "pubsub_broadcast_from", %{node: node, tenant_id: tenant_id} do
+ parent = self()
+
+ spawn_link(fn ->
+ Endpoint.subscribe(@topic)
+ send(parent, :ready)
- assert_receive ^message
+ receive do
+ msg -> send(parent, {:other_process, msg})
+ end
+ end)
- # Remote node received the broadcast
- assert_receive {:relay, ^node, ^message}
+ assert_receive :ready
- assert_receive {
- :telemetry,
- [:realtime, :tenants, :payload, :size],
- %{size: 119},
- %{tenant: "realtime-dev"}
- }
+ message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
+
+ TenantBroadcaster.pubsub_broadcast_from(tenant_id, self(), @topic, message, Phoenix.PubSub, :broadcast)
+
+ assert_receive {:other_process, ^message}
+
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 114},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+
+ # This process does not receive the message
+ refute_receive _any
+ end
end
- end
- describe "pubsub_broadcast_from/5" do
- test "pubsub_broadcast_from", %{node: node} do
- parent = self()
+ describe "pubsub_direct_broadcast/6 #{pubsub_adapter}" do
+ @describetag pubsub_adapter: pubsub_adapter
+
+ test "pubsub_direct_broadcast", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
+
+ TenantBroadcaster.pubsub_direct_broadcast(node(), tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+ TenantBroadcaster.pubsub_direct_broadcast(node, tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
- spawn_link(fn ->
- Endpoint.subscribe(@topic)
- send(parent, :ready)
+ assert_receive ^message
- receive do
- msg -> send(parent, {:other_process, msg})
- end
- end)
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
- assert_receive :ready
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 114},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
- message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
+ test "pubsub_direct_broadcast list payload", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
- TenantBroadcaster.pubsub_broadcast_from("realtime-dev", self(), @topic, message, Phoenix.PubSub)
+ TenantBroadcaster.pubsub_direct_broadcast(node(), tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+ TenantBroadcaster.pubsub_direct_broadcast(node, tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
- assert_receive {:other_process, ^message}
+ assert_receive ^message
- # Remote node received the broadcast
- assert_receive {:relay, ^node, ^message}
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
- assert_receive {
- :telemetry,
- [:realtime, :tenants, :payload, :size],
- %{size: 114},
- %{tenant: "realtime-dev"}
- }
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 130},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
- # This process does not receive the message
- refute_receive _any
+ test "pubsub_direct_broadcast string payload", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: "some text payload"}
+
+ TenantBroadcaster.pubsub_direct_broadcast(node(), tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+ TenantBroadcaster.pubsub_direct_broadcast(node, tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+
+ assert_receive ^message
+
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 119},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
end
end
- def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {:telemetry, event, measures, metadata})
+ describe "collect_payload_size/3" do
+ @describetag pubsub_adapter: :gen_rpc
+
+ test "emit telemetry for struct", %{tenant_id: tenant_id} do
+ TenantBroadcaster.collect_payload_size(
+ tenant_id,
+ %Phoenix.Socket.Broadcast{event: "broadcast", payload: %{"a" => "b"}},
+ :broadcast
+ )
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 65},
+ %{tenant: ^tenant_id, message_type: :broadcast}}
+ end
+
+ test "emit telemetry for map", %{tenant_id: tenant_id} do
+ TenantBroadcaster.collect_payload_size(
+ tenant_id,
+ %{event: "broadcast", payload: %{"a" => "b"}},
+ :postgres_changes
+ )
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 53},
+ %{tenant: ^tenant_id, message_type: :postgres_changes}}
+ end
+
+ test "emit telemetry for non-map", %{tenant_id: tenant_id} do
+ TenantBroadcaster.collect_payload_size(tenant_id, "some blob", :presence)
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 15},
+ %{tenant: ^tenant_id, message_type: :presence}}
+ end
+ end
+
+ def handle_telemetry(event, measures, metadata, %{pid: pid, tenant: tenant}) do
+ if metadata[:tenant] == tenant do
+ send(pid, {:telemetry, event, measures, metadata})
+ end
+ end
end
diff --git a/test/support/clustered.ex b/test/support/clustered.ex
index c7028b79b..f0caa6df0 100644
--- a/test/support/clustered.ex
+++ b/test/support/clustered.ex
@@ -39,6 +39,7 @@ defmodule Clustered do
def start_disconnected(aux_mod \\ nil, opts \\ []) do
extra_config = Keyword.get(opts, :extra_config, [])
phoenix_port = Keyword.get(opts, :phoenix_port, 4012)
+ name = Keyword.get(opts, :name, :peer.random_name())
:ok =
case :net_kernel.start([:"main@127.0.0.1"]) do
@@ -53,7 +54,6 @@ defmodule Clustered do
end
true = :erlang.set_cookie(:cookie)
- name = :peer.random_name()
{:ok, pid, node} =
ExUnit.Callbacks.start_supervised(%{
diff --git a/test/support/containers.ex b/test/support/containers.ex
index cd66f2699..51f923472 100644
--- a/test/support/containers.ex
+++ b/test/support/containers.ex
@@ -3,7 +3,6 @@ defmodule Containers do
alias Realtime.Tenants.Connect
alias Containers.Container
alias Realtime.Database
- alias Realtime.RateCounter
alias Realtime.Tenants.Migrations
use GenServer
@@ -37,7 +36,13 @@ defmodule Containers do
def handle_continue({:pool, max_cases}, state) do
{:ok, _pid} =
:poolboy.start_link(
- [name: {:local, Containers.Pool}, size: max_cases + 2, max_overflow: 0, worker_module: Containers.Container],
+ [
+ strategy: :fifo,
+ name: {:local, Containers.Pool},
+ size: max_cases + 2,
+ max_overflow: 0,
+ worker_module: Containers.Container
+ ],
[]
)
@@ -110,11 +115,25 @@ defmodule Containers do
end
end
+ defp storage_up!(tenant) do
+ settings =
+ Database.from_tenant(tenant, "realtime_test", :stop)
+ |> Map.from_struct()
+ |> Keyword.new()
+
+ case Ecto.Adapters.Postgres.storage_up(settings) do
+ :ok -> :ok
+ {:error, :already_up} -> :ok
+ _ -> raise "Failed to create database"
+ end
+ end
+
# Might be worth changing this to {:ok, tenant}
def checkout_tenant(opts \\ []) do
with container when is_pid(container) <- :poolboy.checkout(Containers.Pool, true, 5_000),
port <- Container.port(container) do
tenant = Generators.tenant_fixture(%{port: port, migrations_ran: 0})
+
run_migrations? = Keyword.get(opts, :run_migrations, false)
settings = Database.from_tenant(tenant, "realtime_test", :stop)
@@ -126,9 +145,9 @@ defmodule Containers do
Postgrex.query!(db_conn, "CREATE SCHEMA IF NOT EXISTS realtime", [])
end)
- Process.exit(conn, :normal)
+ storage_up!(tenant)
- RateCounter.stop(tenant.external_id)
+ RateCounterHelper.stop(tenant.external_id)
# Automatically checkin the container at the end of the test
ExUnit.Callbacks.on_exit(fn ->
@@ -149,6 +168,60 @@ defmodule Containers do
:poolboy.checkin(Containers.Pool, container)
end)
+ publication = "supabase_realtime_test"
+
+ Postgrex.transaction(conn, fn db_conn ->
+ queries = [
+ "DROP TABLE IF EXISTS public.test",
+ "DROP PUBLICATION IF EXISTS #{publication}",
+ "create sequence if not exists test_id_seq;",
+ """
+ create table "public"."test" (
+ "id" int4 not null default nextval('test_id_seq'::regclass),
+ "details" text,
+ primary key ("id"));
+ """,
+ "grant all on table public.test to anon;",
+ "grant all on table public.test to postgres;",
+ "grant all on table public.test to authenticated;",
+ "create publication #{publication} for all tables",
+ # Clean up all replication slots
+ """
+ DO $$
+ DECLARE
+ r RECORD;
+ BEGIN
+ FOR r IN
+ SELECT slot_name, active_pid
+ FROM pg_replication_slots
+ WHERE slot_name LIKE 'supabase_realtime%'
+ LOOP
+ IF r.active_pid IS NOT NULL THEN
+ BEGIN
+ -- try to terminate the backend; ignore any error or race
+ SELECT pg_terminate_backend(r.active_pid);
+ PERFORM pg_sleep(0.5);
+ EXCEPTION WHEN OTHERS THEN
+ NULL;
+ END;
+ END IF;
+
+ BEGIN
+ -- check existence then try to drop; ignore any error or race
+ IF EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = r.slot_name) THEN
+ PERFORM pg_drop_replication_slot(r.slot_name);
+ END IF;
+ EXCEPTION WHEN OTHERS THEN
+ NULL;
+ END;
+ END LOOP;
+ END$$;
+ """
+ ]
+
+ Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
+ end)
+
tenant =
if run_migrations? do
case run_migrations(tenant) do
@@ -156,16 +229,18 @@ defmodule Containers do
# Avoiding to use Tenants.update_migrations_ran/2 because it touches Cachex and it doesn't play well with
# Ecto Sandbox
:ok = Migrations.create_partitions(conn)
- {:ok, tenant} = Realtime.Api.update_tenant(tenant, %{migrations_ran: count})
+ {:ok, tenant} = Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{migrations_ran: count})
tenant
- _ ->
- raise "Faled to run migrations"
+ error ->
+ raise "Failed to run migrations: #{inspect(error)}"
end
else
tenant
end
+ GenServer.stop(conn)
+
tenant
else
_ -> {:error, "failed to checkout a container"}
@@ -267,7 +342,13 @@ defmodule Containers do
@image,
"postgres",
"-c",
- "config_file=/etc/postgresql/postgresql.conf"
+ "config_file=/etc/postgresql/postgresql.conf",
+ "-c",
+ "wal_keep_size=32MB",
+ "-c",
+ "max_wal_size=32MB",
+ "-c",
+ "max_slot_wal_keep_size=32MB"
])
end
end
diff --git a/test/support/generators.ex b/test/support/generators.ex
index 768e3823b..481944772 100644
--- a/test/support/generators.ex
+++ b/test/support/generators.ex
@@ -283,25 +283,28 @@ defmodule Generators do
jwt
end
- @port 4003
- @serializer Phoenix.Socket.V1.JSONSerializer
-
- def get_connection(
- tenant,
- role \\ "anon",
- claims \\ %{},
- params \\ %{vsn: "1.0.0", log_level: :warning}
- ) do
+ # default test port
+ @port 4002
+
+ def get_connection(tenant, serializer \\ Phoenix.Socket.V1.JSONSerializer, opts \\ []) do
+ params = Keyword.get(opts, :params, %{log_level: :warning})
+ claims = Keyword.get(opts, :claims, %{})
+ role = Keyword.get(opts, :role, "anon")
+
params = Enum.reduce(params, "", fn {k, v}, acc -> "#{acc}{k}=#{v}" end)
- uri = "#{uri(tenant)}?#{params}"
+ uri = "#{uri(tenant, serializer)}{params}"
with {:ok, token} <- token_valid(tenant, role, claims),
- {:ok, socket} <- WebsocketClient.connect(self(), uri, @serializer, [{"x-api-key", token}]) do
+ {:ok, socket} <- WebsocketClient.connect(self(), uri, serializer, [{"x-api-key", token}]) do
{socket, token}
end
end
- def uri(tenant, port \\ @port), do: "ws://#{tenant.external_id}.localhost:#{port}/socket/websocket"
+ def uri(tenant, serializer, port \\ @port),
+ do: "ws://#{tenant.external_id}.localhost:#{port}/socket/websocket?vsn=#{vsn(serializer)}"
+
+ defp vsn(Phoenix.Socket.V1.JSONSerializer), do: "1.0.0"
+ defp vsn(RealtimeWeb.Socket.V2Serializer), do: "2.0.0"
@spec token_valid(Tenant.t(), binary(), map()) :: {:ok, binary()}
def token_valid(tenant, role, claims \\ %{}), do: generate_token(tenant, Map.put(claims, :role, role))
diff --git a/test/support/metrics_helper.ex b/test/support/metrics_helper.ex
new file mode 100644
index 000000000..ca31ad91b
--- /dev/null
+++ b/test/support/metrics_helper.ex
@@ -0,0 +1,53 @@
+defmodule MetricsHelper do
+ @spec search(String.t(), String.t(), map() | keyword() | nil) ::
+ {:ok, String.t(), map(), String.t()} | {:error, String.t()}
+ def search(prometheus_metrics, metric_name, expected_tags \\ nil) do
+ # Escape the metric_name to handle any special regex characters
+ escaped_name = Regex.escape(metric_name)
+ regex = ~r/^(?#{escaped_name})\{(?[^}]+)\}\s+(?\d+(?:\.\d+)?)$/
+
+ prometheus_metrics
+ |> IO.iodata_to_binary()
+ |> String.split("\n", trim: true)
+ |> Enum.find_value(
+ nil,
+ fn item ->
+ case parse(item, regex, expected_tags) do
+ {:ok, value} -> value
+ {:error, _reason} -> false
+ end
+ end
+ )
+ |> case do
+ nil -> nil
+ number -> String.to_integer(number)
+ end
+ end
+
+ defp parse(metric_string, regex, expected_tags) do
+ case Regex.named_captures(regex, metric_string) do
+ %{"name" => _name, "tags" => tags_string, "value" => value} ->
+ tags = parse_tags(tags_string)
+
+ if expected_tags && !matching_tags(tags, expected_tags) do
+ {:error, "Tags do not match expected tags"}
+ else
+ {:ok, value}
+ end
+
+ nil ->
+ {:error, "Invalid metric format or metric name mismatch"}
+ end
+ end
+
+ defp parse_tags(tags_string) do
+ ~r/(?[a-zA-Z_][a-zA-Z0-9_]*)="(?[^"]*)"/
+ |> Regex.scan(tags_string, capture: :all_names)
+ |> Enum.map(fn [key, value] -> {key, value} end)
+ |> Map.new()
+ end
+
+ defp matching_tags(tags, expected_tags) do
+ Enum.all?(expected_tags, fn {k, v} -> Map.get(tags, to_string(k)) == to_string(v) end)
+ end
+end
diff --git a/test/support/rate_counter_helper.ex b/test/support/rate_counter_helper.ex
new file mode 100644
index 000000000..660ec422f
--- /dev/null
+++ b/test/support/rate_counter_helper.ex
@@ -0,0 +1,41 @@
+defmodule RateCounterHelper do
+ alias Realtime.RateCounter
+
+ @spec stop(term()) :: :ok
+ def stop(tenant_id) do
+ keys =
+ Registry.select(Realtime.Registry.Unique, [
+ {{{:"$1", :_, {:_, :_, :"$2"}}, :"$3", :_}, [{:==, :"$1", RateCounter}, {:==, :"$2", tenant_id}], [:"$_"]}
+ ])
+
+ Enum.each(keys, fn {{_, _, key}, {pid, _}} ->
+ if Process.alive?(pid), do: GenServer.stop(pid)
+ Realtime.GenCounter.delete(key)
+ Cachex.del!(RateCounter, key)
+ end)
+
+ :ok
+ end
+
+ @spec tick!(RateCounter.Args.t()) :: RateCounter.t()
+ def tick!(args) do
+ [{pid, _}] = Registry.lookup(Realtime.Registry.Unique, {RateCounter, :rate_counter, args.id})
+ send(pid, :tick)
+ {:ok, :sys.get_state(pid)}
+ end
+
+ def tick_tenant_rate_counters!(tenant_id) do
+ keys =
+ Registry.select(Realtime.Registry.Unique, [
+ {{{:"$1", :_, {:_, :_, :"$2"}}, :"$3", :_}, [{:==, :"$1", RateCounter}, {:==, :"$2", tenant_id}], [:"$_"]}
+ ])
+
+ Enum.each(keys, fn {{_, _, _key}, {pid, _}} ->
+ send(pid, :tick)
+ # do a get_state to wait for the tick to be processed
+ :sys.get_state(pid)
+ end)
+
+ :ok
+ end
+end
diff --git a/test/support/tenant_connection.ex b/test/support/tenant_connection.ex
index ce5956b49..77328bdfc 100644
--- a/test/support/tenant_connection.ex
+++ b/test/support/tenant_connection.ex
@@ -4,17 +4,17 @@ defmodule TenantConnection do
"""
alias Realtime.Api.Message
alias Realtime.Database
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
alias Realtime.Tenants.Connect
alias RealtimeWeb.Endpoint
def create_message(attrs, conn, opts \\ [mode: :savepoint]) do
- channel = Message.changeset(%Message{}, attrs)
+ message = Message.changeset(%Message{}, attrs)
{:ok, result} =
Database.transaction(conn, fn transaction_conn ->
- with {:ok, %Message{} = channel} <- Repo.insert(transaction_conn, channel, Message, opts) do
- channel
+ with {:ok, %Message{} = message} <- Repo.insert(transaction_conn, message, Message, opts) do
+ message
end
end)
diff --git a/test/support/test_endpoint.ex b/test/support/test_endpoint.ex
deleted file mode 100644
index 67c477153..000000000
--- a/test/support/test_endpoint.ex
+++ /dev/null
@@ -1,26 +0,0 @@
-defmodule TestEndpoint do
- use Phoenix.Endpoint, otp_app: :phoenix
-
- @session_config store: :cookie,
- key: "_hello_key",
- signing_salt: "change_me"
-
- socket("/socket", RealtimeWeb.UserSocket,
- websocket: [
- connect_info: [:peer_data, :uri, :x_headers],
- fullsweep_after: 20,
- max_frame_size: 8_000_000
- ]
- )
-
- plug(Plug.Session, @session_config)
- plug(:fetch_session)
- plug(Plug.CSRFProtection)
- plug(:put_session)
-
- defp put_session(conn, _) do
- conn
- |> put_session(:from_session, "123")
- |> send_resp(200, Plug.CSRFProtection.get_csrf_token())
- end
-end
diff --git a/test/test_helper.exs b/test/test_helper.exs
index 435f00ef8..767212e24 100644
--- a/test/test_helper.exs
+++ b/test/test_helper.exs
@@ -2,7 +2,7 @@ start_time = :os.system_time(:millisecond)
alias Realtime.Api
alias Realtime.Database
-ExUnit.start(exclude: [:failing], max_cases: 3, capture_log: true)
+ExUnit.start(exclude: [:failing], max_cases: 4, capture_log: true)
max_cases = ExUnit.configuration()[:max_cases]
@@ -15,7 +15,7 @@ end
{:ok, _pid} = Containers.start_link(max_cases)
-for tenant <- Api.list_tenants(), do: Api.delete_tenant(tenant)
+for tenant <- Api.list_tenants(), do: Api.delete_tenant_by_external_id(tenant.external_id)
tenant_name = "dev_tenant"
tenant = Containers.initialize(tenant_name)
@@ -46,17 +46,18 @@ end)
Ecto.Adapters.SQL.Sandbox.mode(Realtime.Repo, :manual)
-end_time = :os.system_time(:millisecond)
-IO.puts("[test_helper.exs] Time to start tests: #{end_time - start_time} ms")
-
Mimic.copy(:syn)
+Mimic.copy(Extensions.PostgresCdcRls.Replications)
+Mimic.copy(Extensions.PostgresCdcRls.Subscriptions)
+Mimic.copy(Realtime.Database)
Mimic.copy(Realtime.GenCounter)
+Mimic.copy(Realtime.GenRpc)
Mimic.copy(Realtime.Nodes)
+Mimic.copy(Realtime.Repo.Replica)
Mimic.copy(Realtime.RateCounter)
Mimic.copy(Realtime.Tenants.Authorization)
Mimic.copy(Realtime.Tenants.Cache)
Mimic.copy(Realtime.Tenants.Connect)
-Mimic.copy(Realtime.Database)
Mimic.copy(Realtime.Tenants.Migrations)
Mimic.copy(Realtime.Tenants.Rebalancer)
Mimic.copy(Realtime.Tenants.ReplicationConnection)
@@ -64,3 +65,13 @@ Mimic.copy(RealtimeWeb.ChannelsAuthorization)
Mimic.copy(RealtimeWeb.Endpoint)
Mimic.copy(RealtimeWeb.JwtVerification)
Mimic.copy(RealtimeWeb.TenantBroadcaster)
+
+# Set the node as the name we use on Clustered.start
+# Also update syn metadata to reflect the new name
+:net_kernel.start([:"main@127.0.0.1"])
+region = Application.get_env(:realtime, :region)
+[{pid, _}] = :syn.members(RegionNodes, region)
+:syn.update_member(RegionNodes, region, pid, fn _ -> [node: node()] end)
+
+end_time = :os.system_time(:millisecond)
+IO.puts("[test_helper.exs] Time to start tests: #{end_time - start_time} ms")