diff --git a/config/config.exs b/config/config.exs index 5e5ec874a..fcb2e3443 100644 --- a/config/config.exs +++ b/config/config.exs @@ -6,3 +6,16 @@ config :nostrum, config :logger, :console, metadata: [:shard, :guild, :channel] if File.exists?("config/secret.exs"), do: import_config("secret.exs") + +if Mix.env() == :test do + config :nostrum, + # constrain the size of the message cache in tests + # to make it easier to test eviction + # and to make sure we don't accidentally modify a production table + # when running tests, use a different table name + caches: [ + messages: + {Nostrum.Cache.MessageCache.Noop, + size_limit: 10, eviction_count: 4, table_name: :nostrum_messages_test} + ] +end diff --git a/guides/advanced/pluggable_caching.md b/guides/advanced/pluggable_caching.md index 4b638a0ab..f88d01080 100644 --- a/guides/advanced/pluggable_caching.md +++ b/guides/advanced/pluggable_caching.md @@ -5,6 +5,13 @@ needs, but all of the caches can be exchanged for your own implementations. For this, implement the behaviours exported by the cache modules under `Nostrum.Cache`. +> ### Exception {: .info} +> +> The exception to the above is the `Nostrum.Cache.MessageCache`, which does not +> include an ETS-based implementation, and defaults to a NoOp cache. This is +> an intentional design decision because caching messages consumes a +> lot more memory than other objects, and is often not needed by most users. + Use the `[:nostrum, :caches]` configuration for configuring which cache implementation you want to use. This can only be set at dependency compilation time. A common situation is that you don't want to cache presences in your bot, @@ -56,7 +63,7 @@ switch this can be added. Mnesia-based caching assumes the user is familar with usage and maintenance of Mnesia: the [Mnesia User's -Guide](https://www.erlang.org/doc/apps/mnesia/users_guide.html) is a good +Guide](https://www.erlang.org/doc/apps/mnesia/mnesia_chap1.html) is a good starting point. @@ -65,20 +72,20 @@ starting point. The NoOp cache adapters are supplied for the case where you do not want to cache specific data from Discord at all. -These cache adapters presently also don't send out any data they receive either: -this means that for caches using the NoOp cache adapters, you won't receive any -gateway events. - - ## Cache invalidation -nostrum does not invalidate cache in any special way: it will maintain it in +Nostrum does not invalidate most caches in any special way: it will maintain it in response to gateway events (for instance by deleting a guild and its members upon leaving it), but won't regularly prune caches or associate expiration times with entries. For volatile (RAM-based) caches this is perfectly fine, however, when implementing your own cache backend that persists to disk in some way, you need to take care of this yourself. +The exception to this is the `Nostrum.Cache.MessageCache.Mnesia` module, which has a +default size limit of 10,000 and will automatically remove the 100 oldest +messages when this limit is reached as well as delete all cached messages for a +channel when the channel is deleted. + ## Cache performance diff --git a/lib/nostrum/cache/base.ex b/lib/nostrum/cache/base.ex index 0f5b2bb0d..17b1aeba7 100644 --- a/lib/nostrum/cache/base.ex +++ b/lib/nostrum/cache/base.ex @@ -3,6 +3,27 @@ defmodule Nostrum.Cache.Base do @moduledoc since: "0.8.0" @moduledoc false + @dialyzer {:nowarn_function, get_cache_module: 2, get_cache_options: 1} + + @guild_cache_config Application.compile_env(:nostrum, [:caches, :guilds]) + @channel_guild_mapping_config Application.compile_env(:nostrum, [ + :caches, + :channel_guild_mapping + ]) + @member_cache_config Application.compile_env(:nostrum, [:caches, :members]) + @message_cache_config Application.compile_env(:nostrum, [:caches, :messages]) + @presence_cache_config Application.compile_env(:nostrum, [:caches, :presences]) + @user_cache_config Application.compile_env(:nostrum, [:caches, :users]) + + @cache_map %{ + guilds: @guild_cache_config, + channel_guild_mapping: @channel_guild_mapping_config, + members: @member_cache_config, + messages: @message_cache_config, + presences: @presence_cache_config, + users: @user_cache_config + } + def mnesia_note, do: """ Please note that this module is only compiled if Mnesia is available on @@ -11,4 +32,20 @@ defmodule Nostrum.Cache.Base do To retrieve the table name used by this cache, use `table/0`. """ + + def get_cache_module(cache_name, default) do + case @cache_map do + %{^cache_name => nil} -> default + %{^cache_name => {module, _opts}} when is_atom(module) -> module + %{^cache_name => module} when is_atom(module) -> module + end + end + + def get_cache_options(cache_name) do + case @cache_map do + %{^cache_name => nil} -> [] + %{^cache_name => {_, opts}} -> opts + %{^cache_name => _} -> [] + end + end end diff --git a/lib/nostrum/cache/cache_supervisor.ex b/lib/nostrum/cache/cache_supervisor.ex index 841f56e4f..04cdbb771 100644 --- a/lib/nostrum/cache/cache_supervisor.ex +++ b/lib/nostrum/cache/cache_supervisor.ex @@ -24,6 +24,7 @@ defmodule Nostrum.Cache.CacheSupervisor do Nostrum.Cache.ChannelGuildMapping, Nostrum.Cache.GuildCache, Nostrum.Cache.MemberCache, + Nostrum.Cache.MessageCache, Nostrum.Cache.UserCache, Nostrum.Cache.PresenceCache ] diff --git a/lib/nostrum/cache/channel_guild_mapping.ex b/lib/nostrum/cache/channel_guild_mapping.ex index 4f48da273..7ed6405eb 100644 --- a/lib/nostrum/cache/channel_guild_mapping.ex +++ b/lib/nostrum/cache/channel_guild_mapping.ex @@ -27,9 +27,8 @@ defmodule Nostrum.Cache.ChannelGuildMapping do """ @moduledoc since: "0.8.0" - @configured_cache :nostrum - |> Application.compile_env( - [:caches, :channel_guild_mapping], + @configured_cache Nostrum.Cache.Base.get_cache_module( + :channel_guild_mapping, @default_implementation ) diff --git a/lib/nostrum/cache/guild_cache.ex b/lib/nostrum/cache/guild_cache.ex index 35072aa66..5e5750da9 100644 --- a/lib/nostrum/cache/guild_cache.ex +++ b/lib/nostrum/cache/guild_cache.ex @@ -27,7 +27,7 @@ defmodule Nostrum.Cache.GuildCache do - the `c:child_spec/1` callback for starting the cache under a supervisor. You need to implement all of them for nostrum to work with your custom - cache. + cache. The "upstream data" wording in this module references the fact that the data that the guild cache (and other caches) retrieves represents the raw @@ -45,8 +45,7 @@ defmodule Nostrum.Cache.GuildCache do alias Nostrum.Struct.Sticker alias Nostrum.Util - @configured_cache :nostrum - |> Application.compile_env([:caches, :guilds], @default_cache_implementation) + @configured_cache Nostrum.Cache.Base.get_cache_module(:guilds, @default_cache_implementation) ## Supervisor callbacks # These set up the backing cache. diff --git a/lib/nostrum/cache/member_cache.ex b/lib/nostrum/cache/member_cache.ex index 7ff8e044a..5b7b9158c 100644 --- a/lib/nostrum/cache/member_cache.ex +++ b/lib/nostrum/cache/member_cache.ex @@ -22,8 +22,7 @@ defmodule Nostrum.Cache.MemberCache do alias Nostrum.Struct.Guild.Member alias Nostrum.Struct.User - @configured_cache :nostrum - |> Application.compile_env([:caches, :members], @default_cache_implementation) + @configured_cache Nostrum.Cache.Base.get_cache_module(:members, @default_cache_implementation) @doc """ Add the member for the given guild from upstream data. diff --git a/lib/nostrum/cache/message_cache.ex b/lib/nostrum/cache/message_cache.ex new file mode 100644 index 000000000..0e6a17603 --- /dev/null +++ b/lib/nostrum/cache/message_cache.ex @@ -0,0 +1,292 @@ +defmodule Nostrum.Cache.MessageCache do + @default_cache_implementation Nostrum.Cache.MessageCache.Noop + + @moduledoc """ + Cache behaviour & dispatcher for Discord messages. + + By default, #{@default_cache_implementation} will be used for caching + messages. You can override this in the `:caches` option of the `nostrum` + application by setting the `:messages` field to a different module, or + to the tuple `{module, config}` where `module` is the module to use and + `config` is any compile-time configuration to pass to the module. + + Unlike the other caches, the default is a no-op cache, as messages take + up a lot of memory and most bots do not need messages to be cached. + If you would like to cache messages, you can change the cache implementation + to one of the provided modules under `Nostrum.Cache.MessageCache` + or write your own. + + ## Writing your own message cache + + As with the other caches, the message cache API consists of two parts: + + - The functions that nostrum calls, such as `c:create/1` or `c:update/1`. + These **do not create any objects in the Discord API**, they are purely + created to update the cached data from data that Discord sends us. If you + want to create objects on Discord, use the functions exposed by `Nostrum.Api` instead. + + - the `c:child_spec/1` callback for starting the cache under a supervisor. + + You need to implement both of them for nostrum to work with your custom + cache. + """ + + @configured_cache Nostrum.Cache.Base.get_cache_module(:messages, @default_cache_implementation) + + alias Nostrum.Snowflake + alias Nostrum.Struct.{Channel, Message, User} + + # callbacks + + @doc """ + Retrieve a single `Nostrum.Struct.Message` from the cache by its ID. + """ + @callback get(Message.id()) :: {:ok, Message.t()} | {:error, :not_found} + + @doc """ + Creates a message in the cache. + + The argument given is the raw message payload from Discord's gateway. + """ + @callback create(map()) :: Message.t() + + @doc """ + Updates a message in the cache. + + The argument given is the raw message payload from Discord's gateway, + and the return value is a tuple of the updated message and the old message if + it was found in the cache, otherwise `nil`. + """ + @callback update(map()) :: {old_message :: Message.t() | nil, updated_message :: Message.t()} + + @doc """ + Deletes a message from the cache. + + Expects the deleted message to be returned if it was found. + """ + @callback delete(Channel.id(), Message.id()) :: Message.t() | nil + + @doc """ + Deletes multiple messages from the cache, any message IDs given + will always be for the same channel. + + Returns a list of the deleted messages. + Note that if a message was not found in the cache, it will + not be included in the returned list. + """ + @callback bulk_delete(Channel.id(), [Message.id()]) :: [Message.t()] + + @doc """ + Called when a channel is deleted. + + Any messages in the cache for the channel should be removed. + """ + @callback channel_delete(Channel.id()) :: :ok + + @doc """ + Return a QLC query handle for the cache for read operations. + + This is used by nostrum to provide any read operations on the cache. Write + operations still need to be implemented separately. + + The Erlang manual on [Implementing a QLC + Table](https://www.erlang.org/doc/man/qlc.html#implementing_a_qlc_table) + contains examples for implementation. To prevent full table scans, accept + match specifications in your `TraverseFun` and implement a `LookupFun` as + documented. + + The query handle must return items in the form `{message_id, message}`, where: + - `message_id` is a `t:Nostrum.Struct.Message.id/0` + - `message` is a `t:Nostrum.Struct.Message.t/0` + + If your cache needs some form of setup or teardown for QLC queries (such as + opening connections), see `c:wrap_qlc/1`. + """ + @callback query_handle() :: :qlc.query_handle() + + @doc """ + Retrieve the child spec for starting the cache under a supervisor. + + This callback is optional, and if not implemented, the cache will not be + started under a supervisor. + """ + @callback child_spec(term()) :: Supervisor.child_spec() + + @doc """ + A function that should wrap any `:qlc` operations. + + If you implement a cache that is backed by a database and want to perform + cleanup and teardown actions such as opening and closing connections, + managing transactions and so on, you want to implement this function. nostrum + will then effectively call `wrap_qlc(fn -> :qlc.e(...) end)`. + + If your cache does not need any wrapping, you can omit this. + """ + @callback wrap_qlc((-> result)) :: result when result: term() + @optional_callbacks wrap_qlc: 1 + + @typedoc """ + Used to constrain the return values of functions that can return + a list of messages from the cache. + """ + @type timestamp_like() :: Snowflake.t() | DateTime.t() + + # User-facing + + @doc """ + Retrieve a message from the cache by channel and message id. + """ + defdelegate get(message_id), to: @configured_cache + + @doc """ + Retrieve a list of messages from the cache with a given channel ID, + after a given date, and before a given date. + + Integers are treated as snowflakes, and the atom `:infinity` when given + as a before date will be treated as the maximum possible date. + """ + @spec get_by_channel(Channel.id(), timestamp_like(), timestamp_like() | :infinity) :: [ + Message.t() + ] + def get_by_channel( + channel_id, + after_timestamp \\ 0, + before_timestamp \\ :infinity, + cache \\ @configured_cache + ) do + after_timestamp = timestamp_like_to_snowflake(after_timestamp) + before_timestamp = timestamp_like_to_snowflake(before_timestamp) + + unsorted_result = + wrap_qlc(cache, fn -> + :nostrum_message_cache_qlc.by_channel( + channel_id, + after_timestamp, + before_timestamp, + cache + ) + |> :qlc.e() + end) + + Enum.sort_by(unsorted_result, & &1.id) + end + + @doc """ + Retrieve a list of messages from the cache with a given author ID, + optionally after a given date, and before a given date. + + Integers are treated as snowflakes, and the atom `:infinity` when given + as a before date will be treated as the maximum possible date. + """ + @spec get_by_author(User.id(), timestamp_like(), timestamp_like() | :infinity) :: [ + Message.t() + ] + def get_by_author( + author_id, + after_timestamp \\ 0, + before_timestamp \\ :infinity, + cache \\ @configured_cache + ) do + after_timestamp = timestamp_like_to_snowflake(after_timestamp) + before_timestamp = timestamp_like_to_snowflake(before_timestamp) + + unsorted_result = + wrap_qlc(cache, fn -> + :nostrum_message_cache_qlc.by_author( + author_id, + after_timestamp, + before_timestamp, + cache + ) + |> :qlc.e() + end) + + Enum.sort_by(unsorted_result, & &1.id) + end + + @doc """ + Retrieve a list of messages from the cache with a given channel ID and author ID, + optionally after a given date, and before a given date. + """ + @spec get_by_channel_and_author( + Channel.id(), + User.id(), + timestamp_like(), + timestamp_like() | :infinity + ) :: [ + Message.t() + ] + def get_by_channel_and_author( + channel_id, + author_id, + after_timestamp \\ 0, + before_timestamp \\ :infinity, + cache \\ @configured_cache + ) do + after_timestamp = timestamp_like_to_snowflake(after_timestamp) + before_timestamp = timestamp_like_to_snowflake(before_timestamp) + + unsorted_result = + wrap_qlc(cache, fn -> + :nostrum_message_cache_qlc.by_channel_and_author( + channel_id, + author_id, + after_timestamp, + before_timestamp, + cache + ) + |> :qlc.e() + end) + + Enum.sort_by(unsorted_result, & &1.id) + end + + @doc """ + Call `c:wrap_qlc/1` on the given cache, if implemented. + + If no cache is given, calls out to the default cache. + """ + @spec wrap_qlc((-> result)) :: result when result: term() + @spec wrap_qlc(module(), (-> result)) :: result when result: term() + def wrap_qlc(cache \\ @configured_cache, fun) do + if function_exported?(cache, :wrap_qlc, 1) do + cache.wrap_qlc(fun) + else + fun.() + end + end + + # Nostrum-facing + + @doc false + defdelegate create(message), to: @configured_cache + @doc false + defdelegate update(message), to: @configured_cache + @doc false + defdelegate delete(channel_id, message_id), to: @configured_cache + @doc false + defdelegate bulk_delete(channel_id, message_ids), to: @configured_cache + @doc false + defdelegate channel_delete(channel_id), to: @configured_cache + + @doc """ + Return the QLC handle of the configured cache. + """ + defdelegate query_handle(), to: @configured_cache + + ## Supervisor callbacks + # These set up the backing cache. + @doc false + defdelegate child_spec(opts), to: @configured_cache + + defp timestamp_like_to_snowflake(:infinity), do: :infinity + defp timestamp_like_to_snowflake(snowflake) when is_integer(snowflake), do: snowflake + + defp timestamp_like_to_snowflake(%DateTime{} = dt) do + case Snowflake.from_datetime(dt) do + {:ok, snowflake} -> snowflake + # The date we got was before Discord's epoch, so we'll just treat it as 0 + :error -> 0 + end + end +end diff --git a/lib/nostrum/cache/message_cache/mnesia.ex b/lib/nostrum/cache/message_cache/mnesia.ex new file mode 100644 index 000000000..92713b572 --- /dev/null +++ b/lib/nostrum/cache/message_cache/mnesia.ex @@ -0,0 +1,289 @@ +if Code.ensure_loaded?(:mnesia) do + defmodule Nostrum.Cache.MessageCache.Mnesia do + @moduledoc """ + An Mnesia-based cache for messages. + + #{Nostrum.Cache.Base.mnesia_note()} + + By default, the cache will store up to `10_000` messages, + and will evict the `100` oldest messages when the limit is reached. + + The reason for the eviction count is that with mnesia it is more efficient to + find X oldest records and delete them all at once than to find the oldest + record and delete it each time a new record is added. + + The Mnesia cache supports the following configuration options: + - `size_limit`: The maximum number of messages to store in the cache. + default: `10_000` + - `eviction_count`: The number of messages to evict when the cache is full. + default: `100` + - `table_name`: The name of the Mnesia table to use for the cache. + default: `:nostrum_messages` + - `compressed`: Whether to use compressed in memory storage for the table. + default: `false` + - `type`: Sets the type of Mnesia table created to cache messages. + Can be either `:set` or `:ordered_set`, by choosing `:ordered_set` the + eviction of the oldest messages will be more efficient, however it means + that the table cannot be changed to only store its contents on disk later. + default: `:ordered_set` + + To change this configuration, you can add the following to your + `config.exs`: + + ```elixir + config :nostrum, + caches: %{ + messages: {Nostrum.Cache.MessageCache.Mnesia, + size_limit: 1000, eviction_count: 50, + table_name: :my_custom_messages_table_name, + compressed: true, type: :set} + } + ``` + + You can also change the table name used by the cache by setting the + `table_name` field in the configuration for the `messages` cache. + """ + + @config Nostrum.Cache.Base.get_cache_options(:messages) + + # allow us to override the table name for testing + # without accidentally overwriting the production table + @table_name @config[:table_name] || :nostrum_messages + @record_name @table_name + + @maximum_size @config[:size_limit] || 10_000 + @eviction_count @config[:eviction_count] || 100 + @compressed_table @config[:compressed] || false + @table_type @config[:type] || :ordered_set + + @behaviour Nostrum.Cache.MessageCache + + alias Nostrum.Cache.MessageCache + alias Nostrum.Snowflake + alias Nostrum.Struct.Channel + alias Nostrum.Struct.Message + alias Nostrum.Util + use Supervisor + + @doc "Start the supervisor." + def start_link(init_arg) do + Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__) + end + + @impl Supervisor + @doc "Set up the cache's Mnesia table." + def init(_init_arg) do + options = table_create_attributes() + + case :mnesia.create_table(@table_name, options) do + {:atomic, :ok} -> :ok + {:aborted, {:already_exists, _tab}} -> :ok + end + + Supervisor.init([], strategy: :one_for_one) + end + + @doc "Retrieve the Mnesia table name used for the cache." + @spec table :: atom() + def table, do: @table_name + + @doc "Drop the table used for caching." + @spec teardown() :: {:atomic, :ok} | {:aborted, term()} + def teardown, do: :mnesia.delete_table(@table_name) + + @doc "Clear any objects in the cache." + @spec clear() :: :ok + def clear do + {:atomic, :ok} = :mnesia.clear_table(@table_name) + :ok + end + + # Used by dispatch + + @impl MessageCache + @doc "Retrieve a single message from the cache by id." + @spec get(Message.id()) :: {:ok, Message.t()} | {:error, :not_found} + def get(message_id) do + :mnesia.activity(:sync_transaction, fn -> + case :mnesia.read(@table_name, message_id, :read) do + [{_tag, _message_id, _channel_id, _author_id, message}] -> + {:ok, message} + + _ -> + {:error, :not_found} + end + end) + end + + @impl MessageCache + @doc "Adds a message to the cache." + @spec create(map()) :: Message.t() + def create(payload) do + message = Message.to_struct(payload) + + record = + {@record_name, message.id, message.channel_id, message.author.id, message} + + writer = fn -> + maybe_evict_records() + :mnesia.write(record) + end + + {:atomic, :ok} = :mnesia.sync_transaction(writer) + message + end + + @impl MessageCache + @doc "Updates a message in the cache." + @spec update(map()) :: {old_message :: Message.t() | nil, updated_message :: Message.t()} + def update(payload) do + atomized_payload = + payload + |> Map.new(fn {k, v} -> {Util.maybe_to_atom(k), v} end) + + %{id: id} = atomized_payload + id = Snowflake.cast!(id) + + :mnesia.activity(:sync_transaction, fn -> + case :mnesia.read(@table_name, id, :write) do + [] -> + # we don't have the old message, so we shouldn't + # save it in the cache as updates are not guaranteed + # to have the full message payload + updated_message = Message.to_struct(atomized_payload) + {nil, updated_message} + + [{_tag, _message_id, _channel_id, _author_id, old_message} = entry] -> + updated_message = Message.to_struct(atomized_payload, old_message) + + :mnesia.write(put_elem(entry, 4, updated_message)) + {old_message, updated_message} + end + end) + end + + @impl MessageCache + @doc "Removes a message from the cache." + @spec delete(Channel.id(), Message.id()) :: Message.t() | nil + def delete(channel_id, message_id) do + :mnesia.activity(:sync_transaction, fn -> + case :mnesia.read(@table_name, message_id, :write) do + # as a safety measure, we check the channel_id + # before deleting the message from the cache + # to prevent deleting messages from the wrong channel + [{_tag, _id, ^channel_id, _author_id, message}] -> + :mnesia.delete(@table_name, message_id, :write) + message + + _ -> + nil + end + end) + end + + @impl MessageCache + @doc """ + Removes and returns a list of messages from the cache. + Messages not found in the cache will not be included in the returned list. + """ + @spec bulk_delete(Channel.id(), [Message.id()]) :: [Message.t()] + def bulk_delete(channel_id, message_ids) do + Enum.reduce(message_ids, [], fn message_id, list -> + case delete(channel_id, message_id) do + nil -> + list + + message -> + [message | list] + end + end) + |> Enum.reverse() + end + + @impl MessageCache + @doc "Removes all messages for a channel which was deleted." + @spec channel_delete(Channel.id()) :: :ok + def channel_delete(channel_id) do + :mnesia.activity(:sync_transaction, fn -> + handle = :nostrum_message_cache_qlc.all_message_ids_in_channel(channel_id, __MODULE__) + + :qlc.fold( + fn message_id, _ -> + :mnesia.delete(@table_name, message_id, :write) + end, + nil, + handle + ) + end) + + :ok + end + + @impl MessageCache + @doc "Return a QLC query handle for the cache for read operations." + @spec query_handle() :: :qlc.query_handle() + def query_handle do + :mnesia.table(@table_name) + end + + @impl MessageCache + @doc "Wrap QLC operations in a transaction" + def wrap_qlc(fun) do + :mnesia.activity(:sync_transaction, fun) + end + + @doc false + def table_create_attributes do + ets_props = + if @compressed_table do + [:compressed] + else + [] + end + + [ + attributes: [:message_id, :channel_id, :author_id, :data], + index: [:channel_id, :author_id], + record_name: @record_name, + storage_properties: [ets: ets_props], + type: @table_type + ] + end + + # assumes its called from within a transaction + defp maybe_evict_records do + size = :mnesia.table_info(@table_name, :size) + + if size >= @maximum_size do + case :mnesia.table_info(@table_name, :type) do + :set -> + evict_set_records() + + :ordered_set -> + evict_ordered_set_records() + end + end + end + + defp evict_set_records do + oldest_message_ids = + :nostrum_message_cache_qlc.sorted_by_age_with_limit(__MODULE__, @eviction_count) + + Enum.each(oldest_message_ids, fn message_id -> + :mnesia.delete(@table_name, message_id, :write) + end) + end + + defp evict_ordered_set_records do + first = :mnesia.first(@table_name) + + Enum.reduce(1..(@eviction_count - 1), [first], fn _i, [key | _rest] = list -> + next_key = :mnesia.next(@table_name, key) + [next_key | list] + end) + |> Enum.each(fn key -> + :mnesia.delete(@table_name, key, :write) + end) + end + end +end diff --git a/lib/nostrum/cache/message_cache/noop.ex b/lib/nostrum/cache/message_cache/noop.ex new file mode 100644 index 000000000..bbc1ed3e6 --- /dev/null +++ b/lib/nostrum/cache/message_cache/noop.ex @@ -0,0 +1,47 @@ +defmodule Nostrum.Cache.MessageCache.Noop do + @moduledoc """ + A no-op message cache. + + This cache does not store any messages and always returns `{:error, :not_found}` + for any operation. + """ + + @behaviour Nostrum.Cache.MessageCache + + alias Nostrum.Cache.MessageCache + alias Nostrum.Struct.Message + alias Nostrum.Util + + use Supervisor + + @doc "Start the supervisor." + def start_link(init_arg) do + Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__) + end + + @impl Supervisor + def init(_init_arg) do + Supervisor.init([], strategy: :one_for_one) + end + + @impl MessageCache + def get(_message_id), do: {:error, :not_found} + + @impl MessageCache + def create(payload), do: Util.cast(payload, {:struct, Message}) + + @impl MessageCache + def update(payload), do: {nil, Util.cast(payload, {:struct, Message})} + + @impl MessageCache + def delete(_channel_id, _message_id), do: nil + + @impl MessageCache + def bulk_delete(_channel_id, _message_ids), do: [] + + @impl MessageCache + def channel_delete(_channel_id), do: :ok + + @impl Nostrum.Cache.MessageCache + def query_handle, do: :qlc.string_to_handle(~c"[].") +end diff --git a/lib/nostrum/cache/presence_cache.ex b/lib/nostrum/cache/presence_cache.ex index c8c3ec9e3..799d39495 100644 --- a/lib/nostrum/cache/presence_cache.ex +++ b/lib/nostrum/cache/presence_cache.ex @@ -30,11 +30,7 @@ defmodule Nostrum.Cache.PresenceCache do @moduledoc since: "0.5.0" - @configured_cache :nostrum - |> Application.compile_env( - [:caches, :presences], - @default_cache_implementation - ) + @configured_cache Nostrum.Cache.Base.get_cache_module(:presences, @default_cache_implementation) alias Nostrum.Struct.{Guild, User} alias Nostrum.Util diff --git a/lib/nostrum/cache/user_cache.ex b/lib/nostrum/cache/user_cache.ex index eb7f00ffd..af7e07e8b 100644 --- a/lib/nostrum/cache/user_cache.ex +++ b/lib/nostrum/cache/user_cache.ex @@ -18,8 +18,7 @@ defmodule Nostrum.Cache.UserCache do alias Nostrum.Util import Nostrum.Snowflake, only: [is_snowflake: 1] - @configured_cache :nostrum - |> Application.compile_env([:caches, :users], @default_cache_implementation) + @configured_cache Nostrum.Cache.Base.get_cache_module(:users, @default_cache_implementation) ## Behaviour specification diff --git a/lib/nostrum/consumer.ex b/lib/nostrum/consumer.ex index 5de0bfb5e..04e211467 100644 --- a/lib/nostrum/consumer.ex +++ b/lib/nostrum/consumer.ex @@ -242,7 +242,9 @@ defmodule Nostrum.Consumer do @type message_delete :: {:MESSAGE_DELETE, MessageDelete.t(), WSState.t()} @type message_delete_bulk :: {:MESSAGE_DELETE_BULK, MessageDeleteBulk.t(), WSState.t()} @type message_update :: - {:MESSAGE_UPDATE, updated_message :: Nostrum.Struct.Message.t(), WSState.t()} + {:MESSAGE_UPDATE, + {old_message :: Nostrum.Struct.Message.t() | nil, + updated_message :: Nostrum.Struct.Message.t()}, WSState.t()} @type message_reaction_add :: {:MESSAGE_REACTION_ADD, MessageReactionAdd.t(), WSState.t()} @type message_reaction_remove :: {:MESSAGE_REACTION_REMOVE, MessageReactionRemove.t(), WSState.t()} diff --git a/lib/nostrum/shard/dispatch.ex b/lib/nostrum/shard/dispatch.ex index 56656f836..abe77ceb0 100644 --- a/lib/nostrum/shard/dispatch.ex +++ b/lib/nostrum/shard/dispatch.ex @@ -5,6 +5,7 @@ defmodule Nostrum.Shard.Dispatch do ChannelGuildMapping, GuildCache, MemberCache, + MessageCache, PresenceCache, UserCache } @@ -42,7 +43,7 @@ defmodule Nostrum.Shard.Dispatch do VoiceState } - alias Nostrum.Struct.{AutoModerationRule, Interaction, Message, ThreadMember, User} + alias Nostrum.Struct.{AutoModerationRule, Interaction, ThreadMember, User} alias Nostrum.Struct.Guild.{AuditLogEntry, Integration, ScheduledEvent, UnavailableGuild} alias Nostrum.Util alias Nostrum.Voice @@ -95,7 +96,13 @@ defmodule Nostrum.Shard.Dispatch do end def handle_event(:CHANNEL_DELETE = event, p, state) do - ChannelGuildMapping.delete(p.id) + channel_id = p.id + + # Starting this as a task to avoid blocking the dispatch process + # since this is a potentially long operation + Task.start(fn -> MessageCache.channel_delete(channel_id) end) + + ChannelGuildMapping.delete(channel_id) {event, GuildCache.channel_delete(p.guild_id, p.id), state} end @@ -242,15 +249,23 @@ defmodule Nostrum.Shard.Dispatch do def handle_event(:INVITE_DELETE = event, p, state), do: {event, InviteDelete.to_struct(p), state} - def handle_event(:MESSAGE_CREATE = event, p, state), do: {event, Message.to_struct(p), state} + def handle_event(:MESSAGE_CREATE = event, p, state), + do: {event, MessageCache.create(p), state} - def handle_event(:MESSAGE_DELETE = event, p, state), - do: {event, MessageDelete.to_struct(p), state} + def handle_event(:MESSAGE_DELETE = event, p, state) do + deleted_message = MessageCache.delete(p.channel_id, p.id) + {event, MessageDelete.to_struct(p, deleted_message), state} + end - def handle_event(:MESSAGE_DELETE_BULK = event, p, state), - do: {event, MessageDeleteBulk.to_struct(p), state} + def handle_event(:MESSAGE_DELETE_BULK = event, p, state) do + deleted_messages = MessageCache.bulk_delete(p.channel_id, p.ids) + p = Map.put(p, :deleted_messages, deleted_messages) + {event, MessageDeleteBulk.to_struct(p), state} + end - def handle_event(:MESSAGE_UPDATE = event, p, state), do: {event, Message.to_struct(p), state} + def handle_event(:MESSAGE_UPDATE = event, p, state) do + {event, MessageCache.update(p), state} + end def handle_event(:MESSAGE_REACTION_ADD = event, p, state) do {event, MessageReactionAdd.to_struct(p), state} diff --git a/lib/nostrum/struct/event/message_delete.ex b/lib/nostrum/struct/event/message_delete.ex index b092690db..29db79631 100644 --- a/lib/nostrum/struct/event/message_delete.ex +++ b/lib/nostrum/struct/event/message_delete.ex @@ -9,7 +9,8 @@ defmodule Nostrum.Struct.Event.MessageDelete do defstruct [ :id, :channel_id, - :guild_id + :guild_id, + :deleted_message ] @typedoc "Id of the deleted message" @@ -25,20 +26,28 @@ defmodule Nostrum.Struct.Event.MessageDelete do """ @type guild_id :: Guild.id() | nil + @typedoc """ + The deleted message, if it was found + in the message cache. + """ + @type deleted_message :: Message.t() | nil + @type t :: %__MODULE__{ id: id, channel_id: channel_id, - guild_id: guild_id + guild_id: guild_id, + deleted_message: deleted_message } @doc false - def to_struct(map) do + def to_struct(map, deleted_message \\ nil) do new = map |> Map.new(fn {k, v} -> {Util.maybe_to_atom(k), v} end) |> Map.update(:id, nil, &Util.cast(&1, Snowflake)) |> Map.update(:channel_id, nil, &Util.cast(&1, Snowflake)) |> Map.update(:guild_id, nil, &Util.cast(&1, Snowflake)) + |> Map.put(:deleted_message, deleted_message) struct(__MODULE__, new) end diff --git a/lib/nostrum/struct/event/message_delete_bulk.ex b/lib/nostrum/struct/event/message_delete_bulk.ex index e1b412b6b..4af559cd0 100644 --- a/lib/nostrum/struct/event/message_delete_bulk.ex +++ b/lib/nostrum/struct/event/message_delete_bulk.ex @@ -6,6 +6,7 @@ defmodule Nostrum.Struct.Event.MessageDeleteBulk do alias Nostrum.Struct.{Channel, Guild, Message} defstruct [ + :deleted_messages, :channel_id, :guild_id, :ids @@ -24,7 +25,13 @@ defmodule Nostrum.Struct.Event.MessageDeleteBulk do @typedoc "Ids of the deleted messages" @type ids :: [Message.id(), ...] + @typedoc """ + The deleted messages, if any were not found + in the message cache they will only have the id and channel_id set. + """ + @type deleted_messages :: [Message.t(), ...] @type t :: %__MODULE__{ + deleted_messages: deleted_messages, channel_id: channel_id, guild_id: guild_id, ids: ids diff --git a/lib/nostrum/struct/message.ex b/lib/nostrum/struct/message.ex index c48cb9a16..d208f7da1 100644 --- a/lib/nostrum/struct/message.ex +++ b/lib/nostrum/struct/message.ex @@ -266,6 +266,51 @@ defmodule Nostrum.Struct.Message do struct(__MODULE__, new) end + # differs from the above in that + # for message updates we have the old message + # and want to merge in the unchanged fields. + # Discord's docs say that for updates, we'll + # only get a subset of the fields. + @doc false + @spec to_struct(map :: map(), old_message :: __MODULE__.t() | nil) :: __MODULE__.t() + def to_struct(map, nil), do: to_struct(map) + + def to_struct(map, old_message) do + new = + map + |> Map.new(fn {k, v} -> {Util.maybe_to_atom(k), v} end) + |> Util.map_update_if_present(:activity, &Util.cast(&1, {:struct, Activity})) + |> Util.map_update_if_present(:application_id, &Util.cast(&1, Snowflake)) + |> Util.map_update_if_present(:application, &Util.cast(&1, {:struct, Application})) + |> Util.map_update_if_present(:attachments, &Util.cast(&1, {:list, {:struct, Attachment}})) + |> Util.map_update_if_present(:author, &Util.cast(&1, {:struct, User})) + |> Util.map_update_if_present(:channel_id, &Util.cast(&1, Snowflake)) + |> Util.map_update_if_present(:components, &Util.cast(&1, {:list, {:struct, Component}})) + |> Util.map_update_if_present(:edited_timestamp, &Util.maybe_to_datetime/1) + |> Util.map_update_if_present(:embeds, &Util.cast(&1, {:list, {:struct, Embed}})) + |> Util.map_update_if_present(:guild_id, &Util.cast(&1, Snowflake)) + |> Util.map_update_if_present(:id, &Util.cast(&1, Snowflake)) + |> Util.map_update_if_present(:interaction, &Util.cast(&1, {:struct, Interaction})) + |> Util.map_update_if_present(:member, &Util.cast(&1, {:struct, Member})) + |> Util.map_update_if_present( + :mention_channels, + &Util.cast(&1, {:list, {:struct, Channel}}) + ) + |> Util.map_update_if_present(:mention_roles, &Util.cast(&1, {:list, Snowflake})) + |> Util.map_update_if_present(:mentions, &Util.cast(&1, {:list, {:struct, User}})) + |> Util.map_update_if_present(:message_reference, &Util.cast(&1, {:struct, Reference})) + |> Util.map_update_if_present(:nonce, &Util.cast(&1, Snowflake)) + |> Util.map_update_if_present(:poll, &Util.cast(&1, {:struct, Poll})) + |> Util.map_update_if_present(:reactions, &Util.cast(&1, {:list, {:struct, Reaction}})) + |> Util.map_update_if_present(:referenced_message, &Util.cast(&1, {:struct, __MODULE__})) + |> Util.map_update_if_present(:sticker_items, &Util.cast(&1, {:list, {:struct, Sticker}})) + |> Util.map_update_if_present(:thread, &Util.cast(&1, {:struct, Channel})) + |> Util.map_update_if_present(:timestamp, &Util.maybe_to_datetime/1) + |> Util.map_update_if_present(:webhook_id, &Util.cast(&1, Snowflake)) + + struct(old_message, new) + end + @doc """ Takes the message and produces a URL that, when clicked from the user client, will jump them to that message, assuming they have access to the message and the message diff --git a/lib/nostrum/util.ex b/lib/nostrum/util.ex index 435c27494..118f07e1a 100644 --- a/lib/nostrum/util.ex +++ b/lib/nostrum/util.ex @@ -273,6 +273,22 @@ defmodule Nostrum.Util do end end + @doc """ + Updates a map with a new value if the key is present. + Otherwise, returns the map unchanged. + """ + @spec map_update_if_present(map(), term(), (term() -> term())) :: map() + def map_update_if_present(map, key, fun) do + case map do + %{^key => value} -> + new_value = fun.(value) + Map.put(map, key, new_value) + + _ -> + map + end + end + @doc false @spec fullsweep_after() :: {:fullsweep_after, non_neg_integer} def fullsweep_after do diff --git a/src/nostrum_message_cache_qlc.erl b/src/nostrum_message_cache_qlc.erl new file mode 100644 index 000000000..0ec7eb667 --- /dev/null +++ b/src/nostrum_message_cache_qlc.erl @@ -0,0 +1,154 @@ +% Native QLC operations. +% +% Using QLC from Elixir we pay the price of having to recompile our query +% handle every time we run it. For longer scans like `reduce` this is less of a +% problem, but for queries that users expect to be fast, like a `get` from an +% ETS table, more than a millisecond is unacceptable. +% +% Apart from the recompilation price, queries written using QLC's +% `string_to_handle` also have worse performance than queries written in native +% Erlang, see +% https://elixirforum.com/t/performance-discrepancies-with-using-qlc-queries-written-in-erlang-and-elixir/56006. +% I assume this is caused by the Erlang parse transform doing smart things at compile time. +% +% NOTE: None of the functions in this module make any guarantees about the +% sorting of the output. If you need a specific order, you must sort the output +% yourself either by using `qlc:keysort` or by using `lists:sort` on the output. + +-module(nostrum_message_cache_qlc). +-export([ + by_channel/4, + by_channel_and_author/5, + by_author/4, + sorted_by_age_with_limit/2, + all_message_ids_in_channel/2 +]). + +-include_lib("stdlib/include/qlc.hrl"). + +% The matching on the cache names here is smelly. But we need it for the +% built-in caches for now. + +-define(MNESIA_CACHE, 'Elixir.Nostrum.Cache.MessageCache.Mnesia'). + +% These must be selected carefully so that QLC can plan using the indices properly. +-define(MNESIA_FORMAT, {_Tag, MessageId, ChannelId, AuthorId, Message}). + +% Fetch all messages in a channel before a given message id, +% and after another given message id. Accepts infinity as a before +% value since erlang term ordering makes atoms always larger than integers. +-spec by_channel( + 'Elixir.Nostrum.Struct.Channel':id(), non_neg_integer() | infinity, non_neg_integer(), module() +) -> qlc:query_handle(). +by_channel(RequestedChannelId, After, Before, ?MNESIA_CACHE) -> + qlc:q([ + Message + || {_Tag, MessageId, ChannelId, _, Message} <- ?MNESIA_CACHE:query_handle(), + ChannelId =:= RequestedChannelId, + MessageId =< Before, + MessageId >= After + ]); +by_channel(RequestedChannelId, After, Before, Cache) -> + qlc:q([ + Message + || {MessageId, #{channel_id := ChannelId} = Message} <- Cache:query_handle(), + ChannelId =:= RequestedChannelId, + MessageId =< Before, + MessageId >= After + ]). + +% lookup the IDs of all cached messages for a given channel. +-spec all_message_ids_in_channel('Elixir.Nostrum.Struct.Channel':id(), module()) -> + qlc:query_handle(). +all_message_ids_in_channel(RequestedChannelId, ?MNESIA_CACHE) -> + qlc:q([ + MessageId + || {_Tag, MessageId, ChannelId, _, _Message} <- ?MNESIA_CACHE:query_handle(), + ChannelId =:= RequestedChannelId + ]); +all_message_ids_in_channel(RequestedChannelId, Cache) -> + qlc:q([ + MessageId + || {MessageId, #{channel_id := ChannelId} = _Message} <- Cache:query_handle(), + ChannelId =:= RequestedChannelId + ]). + +% Lookup all cached messages in a channel by a specific user. +% The output is not sorted. +-spec by_channel_and_author( + 'Elixir.Nostrum.Struct.Channel':id(), 'Elixir.Nostrum.Struct.Message':id(), non_neg_integer(), non_neg_integer() | infinity, module() +) -> qlc:query_handle(). +by_channel_and_author(RequestedChannelId, RequestedUserId, After, Before, ?MNESIA_CACHE) -> + qlc:q([ + Message + || {_Tag, MessageId, ChannelId, AuthorId, Message} <- ?MNESIA_CACHE:query_handle(), + ChannelId =:= RequestedChannelId, + AuthorId =:= RequestedUserId, + MessageId =< Before, + MessageId >= After + ]); +by_channel_and_author(RequestedChannelId, RequestedUserId, After, Before, Cache) -> + qlc:q([ + Message + || {MessageId, #{channel_id := ChannelId, author := #{id := AuthorId}} = Message} <- Cache:query_handle(), + ChannelId =:= RequestedChannelId, + AuthorId =:= RequestedUserId, + MessageId =< Before, + MessageId >= After + ]). + +% Lookup all cached messages by a specific user. +% with a message id greater than After and less than Before. +-spec by_author( + 'Elixir.Nostrum.Struct.User':id(), + After :: non_neg_integer(), + Before :: non_neg_integer(), + module() +) -> qlc:query_handle(). +by_author(RequestedUserId, After, Before, ?MNESIA_CACHE) -> + qlc:q([ + Message + || {_Tag, MessageId, _ChannelId, AuthorId, Message} <- ?MNESIA_CACHE:query_handle(), + AuthorId =:= RequestedUserId, + MessageId =< Before, + MessageId >= After + ]); +by_author(RequestedUserId, After, Before, Cache) -> + qlc:q([ + Message + || {MessageId, #{author := #{id := AuthorId}} = Message} <- Cache:query_handle(), + AuthorId =:= RequestedUserId, + MessageId =< Before, + MessageId >= After + ]). + +% Lookup the id of cached messages sorted by message id. +-spec sorted_by_age_with_limit(module(), non_neg_integer()) -> list(). +sorted_by_age_with_limit(?MNESIA_CACHE, Limit) -> + Q1 = qlc:q([ + MessageId + || {_Tag, MessageId, _ChannelId, _AuthorId, _Message} <- ?MNESIA_CACHE:query_handle() + ]), + sort_with_limit(Q1, Limit); +sorted_by_age_with_limit(Cache, Limit) -> + Q1 = qlc:q([MessageId || {MessageId, _Message} <- Cache:query_handle()]), + sort_with_limit(Q1, Limit). + +sort_with_limit(Q1, Limit) -> + Fn = fun(MessageId, {Count1, Set1, Largest1}) -> + if + (MessageId < Largest1) andalso (Count1 >= Limit) -> + Set2 = gb_sets:delete(Largest1, Set1), + Set3 = gb_sets:insert(MessageId, Set2), + Largest2 = gb_sets:largest(Set3), + {Count1, Set3, Largest2}; + (Count1 < Limit) -> + Set2 = gb_sets:insert(MessageId, Set1), + Largest2 = gb_sets:largest(Set2), + {Count1 + 1, Set2, Largest2}; + true -> + {Count1, Set1, Largest1} + end + end, + {_, Set, _} = qlc:fold(Fn, {0, gb_sets:new(), 0}, Q1), + lists:reverse(gb_sets:to_list(Set)). diff --git a/test/nostrum/cache/message_cache/mnesia_additional_test.exs b/test/nostrum/cache/message_cache/mnesia_additional_test.exs new file mode 100644 index 000000000..2cf1ea7eb --- /dev/null +++ b/test/nostrum/cache/message_cache/mnesia_additional_test.exs @@ -0,0 +1,343 @@ +defmodule Nostrum.Cache.MessageCache.MnesiaAdditionalTest do + use ExUnit.Case + + alias Nostrum.Cache.MessageCache + alias Nostrum.Struct.Message + + @test_message %{ + id: 1_234_567_000, + channel_id: 7_654_321_000, + author: %{ + id: 12_345_000, + username: "test", + avatar: nil, + bot: true, + mfa_enabled: nil, + verified: nil + }, + content: "Hello, world!", + timestamp: "1970-01-01T00:00:00Z", + edited_timestamp: nil + } + + @test_message_two %{ + id: 7_654_321_000_000_000, + channel_id: 1_234_567_000, + author: %{ + id: 54_321_000, + username: "test two", + avatar: nil, + bot: false, + mfa_enabled: nil, + verified: nil + }, + content: "Goodbye, world!", + timestamp: "2038-01-01T00:00:00Z", + edited_timestamp: nil, + embeds: [ + %{ + title: "Test Embed", + description: "This is a test embed", + url: "https://example.com", + timestamp: "2038-01-01T00:00:00Z", + color: 0x00FF00, + footer: %{ + text: "Test Footer" + }, + fields: [ + %{ + name: "Test Field", + value: "Test Value", + inline: false + } + ] + } + ] + } + + setup do + on_exit(:cleanup, fn -> + try do + MessageCache.Mnesia.teardown() + rescue + e -> e + end + end) + + [pid: start_supervised!(MessageCache.Mnesia)] + end + + describe "create/1" do + test "evicts the messages with the lowest ids when it gets full" do + for id <- 1..11, do: MessageCache.Mnesia.create(Map.put(@test_message, :id, id)) + + # in tests, the cache is limited to 10 messages + # and we evict 4 messages when hitting the limit + # so the first 4 messages should be evicted + + for id <- 1..4 do + assert MessageCache.Mnesia.get(id) == {:error, :not_found} + end + + for id <- 5..11 do + assert {:ok, %Message{id: ^id}} = MessageCache.Mnesia.get(id) + end + end + + test "eviction for tables of type set works as well" do + # drop and recreate the table with a different type + MessageCache.Mnesia.teardown() + + table_create_attributes = + MessageCache.Mnesia.table_create_attributes() + |> Keyword.put(:type, :set) + + {:atomic, :ok} = :mnesia.create_table(MessageCache.Mnesia.table(), table_create_attributes) + + for id <- 1..11, do: MessageCache.Mnesia.create(Map.put(@test_message, :id, id)) + + for id <- 1..4 do + assert MessageCache.Mnesia.get(id) == {:error, :not_found} + end + + for id <- 5..11 do + assert {:ok, %Message{id: ^id}} = MessageCache.Mnesia.get(id) + end + end + end + + describe "update/1" do + test "returns {old_message, updated_message} when the old message is found in the cache" do + expected_old_message = MessageCache.Mnesia.create(@test_message_two) + + updated_payload = %{ + id: @test_message_two.id, + content: "Hello, world!", + channel_id: @test_message_two.channel_id + } + + {old_message, updated_message} = MessageCache.Mnesia.update(updated_payload) + + assert old_message == expected_old_message + assert updated_message == %{old_message | content: "Hello, world!"} + end + + test "does not save the updated message to the cache it was not there before" do + updated_payload = %{ + id: 10_258_109_258_109_258_125, + content: "Hello, world!", + channel_id: 10_258_109_258_109_258_125 + } + + {old_message, updated_message} = MessageCache.Mnesia.update(updated_payload) + + assert updated_message == Message.to_struct(updated_payload) + assert old_message == nil + assert MessageCache.Mnesia.get(10_258_109_258_109_258_125) == {:error, :not_found} + end + end + + describe "get/1" do + test "returns {:ok, message} when the message is found in the cache" do + expected = MessageCache.Mnesia.create(@test_message) + assert {:ok, expected} == MessageCache.Mnesia.get(@test_message.id) + end + end + + describe "delete/2" do + test "returns the deleted message when it is found in the cache" do + expected_message = MessageCache.Mnesia.create(@test_message) + + assert expected_message == + MessageCache.Mnesia.delete(@test_message.channel_id, @test_message.id) + end + end + + describe "bulk_delete/2" do + test "returns the deleted messages when they are found in the cache" do + expected_messages = [ + MessageCache.Mnesia.create(@test_message), + MessageCache.Mnesia.create(%{@test_message_two | channel_id: @test_message.channel_id}) + ] + + assert expected_messages == + MessageCache.Mnesia.bulk_delete(@test_message.channel_id, [ + @test_message.id, + @test_message_two.id + ]) + end + + test "does not include messages not found in the cache in the returned list" do + expected_message = MessageCache.Mnesia.create(@test_message) + + assert [expected_message] == + MessageCache.Mnesia.bulk_delete(@test_message.channel_id, [ + @test_message.id, + @test_message_two.id + ]) + end + end + + describe "channel_delete/1" do + test "deletes all messages for the channel" do + MessageCache.Mnesia.create(@test_message) + MessageCache.Mnesia.create(%{@test_message_two | channel_id: @test_message.channel_id}) + + assert :ok == MessageCache.Mnesia.channel_delete(@test_message.channel_id) + assert {:error, :not_found} == MessageCache.Mnesia.get(@test_message.id) + assert {:error, :not_found} == MessageCache.Mnesia.get(@test_message_two.id) + end + end + + describe "get_by_channel/4" do + setup do + MessageCache.Mnesia.create(@test_message) + MessageCache.Mnesia.create(%{@test_message_two | channel_id: @test_message.channel_id}) + MessageCache.Mnesia.create(%{@test_message_two | id: 10_000_000_000_000_000}) + + :ok + end + + test "only returns messages with the given channel_id" do + expected_message_one = Message.to_struct(@test_message) + + expected_message_two = + Message.to_struct(%{@test_message_two | channel_id: @test_message.channel_id}) + + assert [expected_message_one, expected_message_two] == + MessageCache.get_by_channel( + @test_message.channel_id, + 0, + :infinity, + MessageCache.Mnesia + ) + end + + test "it allows constraining the messages by timestamp" do + expected_message_one = Message.to_struct(@test_message) + + assert [expected_message_one] == + MessageCache.get_by_channel( + @test_message.channel_id, + 0, + 5_000_000_000_000, + MessageCache.Mnesia + ) + end + + test "it also allows giving a datetime instead of a snowflake" do + expected_message_one = Message.to_struct(@test_message) + + assert [expected_message_one] == + MessageCache.get_by_channel( + @test_message.channel_id, + ~U[2015-01-01 00:00:00.0000Z], + ~U[2015-01-02 00:00:00.0000Z], + MessageCache.Mnesia + ) + end + end + + describe "get_by_author/4" do + setup do + MessageCache.Mnesia.create(@test_message) + MessageCache.Mnesia.create(@test_message_two) + + MessageCache.Mnesia.create(%{ + @test_message_two + | id: 10_000_000_000_000, + channel_id: 500_000_000 + }) + + :ok + end + + test "only returns messages with the given author_id" do + expected_message_one = + Message.to_struct(%{@test_message_two | id: 10_000_000_000_000, channel_id: 500_000_000}) + + expected_message_two = Message.to_struct(@test_message_two) + + assert [expected_message_one, expected_message_two] == + MessageCache.get_by_author( + @test_message_two.author.id, + 0, + :infinity, + MessageCache.Mnesia + ) + end + + test "it allows constraining the messages by timestamp" do + expected_message_one = Message.to_struct(@test_message_two) + + assert [expected_message_one] == + MessageCache.get_by_author( + @test_message_two.author.id, + 5_000_000_000_000_000, + 11_000_000_000_000_000, + MessageCache.Mnesia + ) + end + + test "it also allows giving a datetime instead of a snowflake" do + expected_message_one = Message.to_struct(@test_message_two) + + assert [expected_message_one] == + MessageCache.get_by_author( + @test_message_two.author.id, + ~U[2015-01-20 00:00:00.0000Z], + ~U[2015-01-25 00:00:00.0000Z], + MessageCache.Mnesia + ) + end + end + + describe "get_by_channel_id_and_author/5" do + setup do + MessageCache.Mnesia.create(@test_message) + MessageCache.Mnesia.create(%{@test_message_two | channel_id: @test_message.channel_id}) + MessageCache.Mnesia.create(%{@test_message_two | id: 10_000_000_000_000}) + + :ok + end + + test "only returns messages with the given channel_id and author_id" do + expected_message_one = Message.to_struct(@test_message) + + assert [expected_message_one] == + MessageCache.get_by_channel_and_author( + @test_message.channel_id, + @test_message.author.id, + 0, + :infinity, + MessageCache.Mnesia + ) + end + + test "it allows constraining the messages by timestamp" do + expected_message_one = Message.to_struct(@test_message) + + assert [expected_message_one] == + MessageCache.get_by_channel_and_author( + @test_message.channel_id, + @test_message.author.id, + 0, + 5_000_000_000_000, + MessageCache.Mnesia + ) + end + + test "it also allows giving a datetime instead of a snowflake" do + expected_message_one = Message.to_struct(@test_message) + + assert [expected_message_one] == + MessageCache.get_by_channel_and_author( + @test_message.channel_id, + @test_message.author.id, + ~U[2015-01-01 00:00:00.0000Z], + ~U[2015-01-02 00:00:00.0000Z], + MessageCache.Mnesia + ) + end + end +end diff --git a/test/nostrum/cache/message_cache_meta_test.exs b/test/nostrum/cache/message_cache_meta_test.exs new file mode 100644 index 000000000..c575a488f --- /dev/null +++ b/test/nostrum/cache/message_cache_meta_test.exs @@ -0,0 +1,117 @@ +defmodule Nostrum.Cache.MessageCacheMetaTest do + alias Nostrum.Cache.MessageCache + use ExUnit.Case, async: true + + @cache_modules [ + # Dispatcher + MessageCache, + # Implementation + MessageCache.Mnesia, + MessageCache.Noop + ] + + for cache <- @cache_modules do + defmodule :"#{cache}Test" do + alias Nostrum.Struct.Message + use ExUnit.Case + + # this is needed because otherwise we cannot access + # the cache in the tests + @cache cache + + @test_message %{ + id: 1_234_567, + channel_id: 7_654_321, + author: %{ + id: 12345, + username: "test", + avatar: nil, + bot: true, + mfa_enabled: nil, + verified: nil + }, + content: "Hello, world!", + timestamp: "1970-01-01T00:00:00Z", + edited_timestamp: nil + } + + @test_message_two %{ + id: 7_654_321, + channel_id: 1_234_567, + author: %{ + id: 54321, + username: "test two", + avatar: nil, + bot: false, + mfa_enabled: nil, + verified: nil + }, + content: "Goodbye, world!", + timestamp: "2038-01-01T00:00:00Z", + edited_timestamp: nil, + embeds: [ + %{ + title: "Test Embed", + description: "This is a test embed", + url: "https://example.com", + timestamp: 0, + color: 0x00FF00, + footer: %{ + text: "Test Footer" + }, + fields: [ + %{ + name: "Test Field", + value: "Test Value", + inline: false + } + ] + } + ] + } + + setup do + on_exit(:cleanup, fn -> + try do + if function_exported?(@cache, :teardown, 0) do + apply(@cache, :teardown, []) + end + rescue + e -> e + end + end) + + [pid: start_supervised!(@cache)] + end + + describe "create/1" do + test "returns a struct of the created message" do + expected_one = Message.to_struct(@test_message) + assert expected_one == @cache.create(@test_message) + + expected_two = Message.to_struct(@test_message_two) + assert expected_two == @cache.create(@test_message_two) + end + end + + describe "update/1" do + test "returns {nil, updated_message} on and uncached message" do + expected = Message.to_struct(@test_message) + assert {nil, expected} == @cache.update(@test_message) + end + end + + describe "get/1" do + test "returns {:error, :not_found} on an uncached message" do + assert {:error, :not_found} == @cache.get(10_258_109_258_109_258_125) + end + end + + describe "delete/2" do + test "returns nil on an uncached message" do + assert nil == @cache.delete(10_258_109_258_109_258_125, 10_258_109_258_109_258_125) + end + end + end + end +end diff --git a/test/nostrum/shard/dispatch_test.exs b/test/nostrum/shard/dispatch_test.exs index 0b1ddf509..cb6499dc6 100644 --- a/test/nostrum/shard/dispatch_test.exs +++ b/test/nostrum/shard/dispatch_test.exs @@ -25,7 +25,7 @@ defmodule Nostrum.Shard.DispatchTest do {key, event, _} = Dispatch.handle_event(:MESSAGE_DELETE_BULK, payload, %{}) assert(^key = :MESSAGE_DELETE_BULK) - assert(^event = struct(MessageDeleteBulk, payload)) + assert(^event = struct(MessageDeleteBulk, Map.put(payload, :deleted_messages, []))) end end end