code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
defmodule Map do
@moduledoc """
A `Dict` implementation that works on maps.
Maps are key-value stores where keys are compared using
the match operator (`===`). Maps can be created with
the `%{}` special form defined in the `Kernel.SpecialForms`
module.
For more information about the functions in this module and
their APIs, please consult the `Dict` module.
"""
use Dict
@type key :: any
@type value :: any
defdelegate [keys(map), values(map), merge(map1, map2), to_list(map)], to: :maps
@compile {:inline, fetch: 2, put: 3, delete: 2, has_key?: 2}
# TODO: Deprecate by 1.3
# TODO: Remove by 1.4
@doc false
def size(map) do
map_size(map)
end
@doc """
Returns a new empty map.
"""
@spec new :: map
def new, do: %{}
@doc """
Creates a map from an enumerable.
Duplicated keys are removed; the latest one prevails.
## Examples
iex> Map.new([{:b, 1}, {:a, 2}])
%{a: 2, b: 1}
iex> Map.new([a: 1, a: 2, a: 3])
%{a: 3}
"""
@spec new(Enum.t) :: map
def new(enumerable) do
Enum.reduce(enumerable, %{}, fn {k, v}, acc -> put(acc, k, v) end)
end
@doc """
Creates a map from an enumerable via the transformation function.
Duplicated entries are removed; the latest one prevails.
## Examples
iex> Map.new([:a, :b], fn x -> {x, x} end)
%{a: :a, b: :b}
"""
@spec new(Enum.t, (term -> {key, value})) :: map
def new(enumerable, transform) do
fun = fn el, acc ->
{k, v} = transform.(el)
put(acc, k, v)
end
Enum.reduce(enumerable, %{}, fun)
end
def has_key?(map, key), do: :maps.is_key(key, map)
def fetch(map, key), do: :maps.find(key, map)
def put(map, key, val) do
:maps.put(key, val, map)
end
def delete(map, key), do: :maps.remove(key, map)
def merge(map1, map2, callback) do
:maps.fold fn k, v2, acc ->
update(acc, k, v2, fn(v1) -> callback.(k, v1, v2) end)
end, map1, map2
end
@doc """
Updates the value in the map with the given function.
"""
def update!(%{} = map, key, fun) do
case fetch(map, key) do
{:ok, value} ->
put(map, key, fun.(value))
:error ->
:erlang.error({:badkey, key})
end
end
def update!(map, _key, _fun), do: :erlang.error({:badmap, map})
@doc """
Gets a value and updates a map in one operation.
"""
def get_and_update(%{} = map, key, fun) do
current_value = case :maps.find(key, map) do
{:ok, value} -> value
:error -> nil
end
{get, update} = fun.(current_value)
{get, :maps.put(key, update, map)}
end
def get_and_update(map, _key, _fun), do: :erlang.error({:badmap, map})
@doc """
Gets a value and updates a map only if the key exists in one operation.
"""
def get_and_update!(%{} = map, key, fun) do
case :maps.find(key, map) do
{:ok, value} ->
{get, update} = fun.(value)
{get, :maps.put(key, update, map)}
:error ->
:erlang.error({:badkey, key})
end
end
def get_and_update!(map, _key, _fun), do: :erlang.error({:badmap, map})
@doc """
Converts a struct to map.
It accepts the struct module or a struct itself and
simply removes the `__struct__` field from the struct.
## Example
defmodule User do
defstruct [:name]
end
Map.from_struct(User)
#=> %{name: nil}
Map.from_struct(%User{name: "john"})
#=> %{name: "john"}
"""
def from_struct(struct) when is_atom(struct) do
:maps.remove(:__struct__, struct.__struct__)
end
def from_struct(%{__struct__: _} = struct) do
:maps.remove(:__struct__, struct)
end
def equal?(map1, map2)
def equal?(%{} = map1, %{} = map2), do: map1 === map2
end | lib/elixir/lib/map.ex | 0.680348 | 0.666066 | map.ex | starcoder |
defmodule GenEvent.Behaviour do
@moduledoc """
This module is a convenience for defining GenEvent callbacks in Elixir.
GenEvent is an OTP behaviour that encapsulates event handling functionality.
## Example
Below is an example of a GenEvent that stores notifications
until they are fetched:
defmodule MyEventHandler do
use GenEvent.Behaviour
# Callbacks
def init(_) do
{ :ok, [] }
end
def handle_event({:notification, x}, notifications) do
{ :ok, [x|notifications] }
end
def handle_call(:notifications, notifications) do
{:ok, Enum.reverse(notifications), []}
end
end
{ :ok, pid } = :gen_event.start_link
#=> {:ok,#PID<0.42.0>}
:gen_event.add_handler(pid, MyEventHandler, [])
#=> :ok
:gen_event.notify(pid, {:notification, 1})
#=> :ok
:gen_event.notify(pid, {:notification, 2})
#=> :ok
:gen_event.call(pid, MyEventHandler, :notifications)
#=> [1, 2]
:gen_event.call(pid, MyEventHandler, :notifications)
#=> []
Notice we never call the server callbacks directly, they are called
by OTP whenever we interact with the server.
Starting and sending messages to the GenEvent is done
via Erlang's `:gen_event` module. For more information,
please refer to the following:
* http://www.erlang.org/doc/man/gen_event.html
* http://learnyousomeerlang.com/event-handlers
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
@behaviour :gen_event
@doc false
def init(args) do
{ :ok, args }
end
@doc false
def handle_event(_event, state) do
{ :ok, state }
end
@doc false
def handle_call(_request, state) do
{ :ok, :ok, state }
end
@doc false
def handle_info(_msg, state) do
{ :ok, state }
end
@doc false
def terminate(reason, state) do
:ok
end
@doc false
def code_change(_old, state, _extra) do
{ :ok, state }
end
defoverridable [init: 1,
handle_event: 2,
handle_call: 2, handle_info: 2,
terminate: 2, code_change: 3]
end
end
end | lib/elixir/lib/gen_event/behaviour.ex | 0.678647 | 0.413862 | behaviour.ex | starcoder |
defmodule Seren.Player do
@moduledoc """
The Player context.
"""
import Ecto.Query, warn: false
alias Seren.Repo
alias Seren.Player.Track
@doc """
Returns the list of tracks.
## Examples
iex> list_tracks()
[%Track{}, ...]
"""
def list_tracks do
Repo.all(Track)
end
def list_tracks(limit) do
from(t in Track, join: artist in assoc(t, :artist), left_join: album in assoc(t, :album), limit: ^limit, order_by: [artist.name, album.title, :album_disc_number, :track_number, :title])
|> Repo.all
end
def list_tracks(limit, offset) do
from(t in Track, join: artist in assoc(t, :artist), left_join: album in assoc(t, :album), limit: ^limit, offset: ^offset, order_by: [artist.name, album.title, :album_disc_number, :track_number, :title])
|> Repo.all
end
@doc """
Returns list of tracks for various models
"""
def tracks_for_artist(id) do
from(t in Track, where: t.artist_id == ^id, left_join: album in assoc(t, :album), order_by: [album.title, :album_disc_number, :track_number, :title])
|> Repo.all
end
def tracks_for_genre(id) do
from(t in Track, join: artist in assoc(t, :artist), left_join: album in assoc(t, :album), where: t.genre_id == ^id, order_by: [artist.name, album.title, :album_disc_number, :track_number, :title])
|> Repo.all
end
def tracks_for_composer(id) do
from(t in Track, join: artist in assoc(t, :artist), left_join: album in assoc(t, :album), where: t.composer_id == ^id, order_by: [album.title, :album_disc_number, :track_number, artist.name, :title])
|> Repo.all
end
def tracks_for_album(id) do
from(t in Track, join: artist in assoc(t, :artist), where: t.album_id == ^id, order_by: [:album_disc_number, :track_number, artist.name, :title])
|> Repo.all
end
@doc """
Returns list of tracks for search query
"""
def tracks_for_search(query, limit) do
like_query = "%#{String.replace(query, "%", "\\%") |> String.replace("_", "\\_")}%"
from(t in Track, join: artist in assoc(t, :artist), left_join: album in assoc(t, :album), left_join: c in assoc(t, :composer), where: ilike(t.title, ^like_query) or ilike(artist.name, ^like_query) or ilike(album.title, ^like_query) or ilike(c.name, ^like_query), order_by: [artist.name, album.title, :album_disc_number, :track_number, :title], limit: ^limit)
|> Repo.all
end
@doc """
Gets a single track.
Raises `Ecto.NoResultsError` if the Track does not exist.
## Examples
iex> get_track!(123)
%Track{}
iex> get_track!(456)
** (Ecto.NoResultsError)
"""
def get_track!(id), do: Repo.get!(Track, id)
@doc """
Creates a track.
## Examples
iex> create_track(%{field: value})
{:ok, %Track{}}
iex> create_track(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_track(attrs \\ %{}) do
%Track{}
|> Track.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a track.
## Examples
iex> update_track(track, %{field: new_value})
{:ok, %Track{}}
iex> update_track(track, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_track(%Track{} = track, attrs) do
track
|> Track.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Track.
## Examples
iex> delete_track(track)
{:ok, %Track{}}
iex> delete_track(track)
{:error, %Ecto.Changeset{}}
"""
def delete_track(%Track{} = track) do
Repo.delete(track)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking track changes.
## Examples
iex> change_track(track)
%Ecto.Changeset{source: %Track{}}
"""
def change_track(%Track{} = track) do
Track.changeset(track, %{})
end
alias Seren.Player.Artist
@doc """
Returns the list of artists.
## Examples
iex> list_artists()
[%Artist{}, ...]
"""
def list_artists do
from(Artist, order_by: :name)
|> Repo.all
end
@doc """
Gets a single artist.
Raises `Ecto.NoResultsError` if the Artist does not exist.
## Examples
iex> get_artist!(123)
%Artist{}
iex> get_artist!(456)
** (Ecto.NoResultsError)
"""
def get_artist!(id) do
Repo.get!(Artist, id)
end
@doc """
Creates a artist.
## Examples
iex> create_artist(%{field: value})
{:ok, %Artist{}}
iex> create_artist(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_artist(attrs \\ %{}) do
%Artist{}
|> Artist.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a artist.
## Examples
iex> update_artist(artist, %{field: new_value})
{:ok, %Artist{}}
iex> update_artist(artist, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_artist(%Artist{} = artist, attrs) do
artist
|> Artist.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Artist.
## Examples
iex> delete_artist(artist)
{:ok, %Artist{}}
iex> delete_artist(artist)
{:error, %Ecto.Changeset{}}
"""
def delete_artist(%Artist{} = artist) do
Repo.delete(artist)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking artist changes.
## Examples
iex> change_artist(artist)
%Ecto.Changeset{source: %Artist{}}
"""
def change_artist(%Artist{} = artist) do
Artist.changeset(artist, %{})
end
alias Seren.Player.Genre
@doc """
Returns the list of genres.
## Examples
iex> list_genres()
[%Genre{}, ...]
"""
def list_genres do
from(Genre, order_by: :name)
|> Repo.all
end
@doc """
Gets a single genre.
Raises `Ecto.NoResultsError` if the Genre does not exist.
## Examples
iex> get_genre!(123)
%Genre{}
iex> get_genre!(456)
** (Ecto.NoResultsError)
"""
def get_genre!(id), do: Repo.get!(Genre, id)
@doc """
Creates a genre.
## Examples
iex> create_genre(%{field: value})
{:ok, %Genre{}}
iex> create_genre(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_genre(attrs \\ %{}) do
%Genre{}
|> Genre.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a genre.
## Examples
iex> update_genre(genre, %{field: new_value})
{:ok, %Genre{}}
iex> update_genre(genre, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_genre(%Genre{} = genre, attrs) do
genre
|> Genre.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Genre.
## Examples
iex> delete_genre(genre)
{:ok, %Genre{}}
iex> delete_genre(genre)
{:error, %Ecto.Changeset{}}
"""
def delete_genre(%Genre{} = genre) do
Repo.delete(genre)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking genre changes.
## Examples
iex> change_genre(genre)
%Ecto.Changeset{source: %Genre{}}
"""
def change_genre(%Genre{} = genre) do
Genre.changeset(genre, %{})
end
alias Seren.Player.Composer
@doc """
Returns the list of composers.
## Examples
iex> list_composers()
[%Composer{}, ...]
"""
def list_composers do
from(Composer, order_by: :name)
|> Repo.all
end
@doc """
Gets a single composer.
Raises `Ecto.NoResultsError` if the Composer does not exist.
## Examples
iex> get_composer!(123)
%Composer{}
iex> get_composer!(456)
** (Ecto.NoResultsError)
"""
def get_composer!(id), do: Repo.get!(Composer, id)
@doc """
Creates a composer.
## Examples
iex> create_composer(%{field: value})
{:ok, %Composer{}}
iex> create_composer(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_composer(attrs \\ %{}) do
%Composer{}
|> Composer.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a composer.
## Examples
iex> update_composer(composer, %{field: new_value})
{:ok, %Composer{}}
iex> update_composer(composer, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_composer(%Composer{} = composer, attrs) do
composer
|> Composer.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Composer.
## Examples
iex> delete_composer(composer)
{:ok, %Composer{}}
iex> delete_composer(composer)
{:error, %Ecto.Changeset{}}
"""
def delete_composer(%Composer{} = composer) do
Repo.delete(composer)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking composer changes.
## Examples
iex> change_composer(composer)
%Ecto.Changeset{source: %Composer{}}
"""
def change_composer(%Composer{} = composer) do
Composer.changeset(composer, %{})
end
alias Seren.Player.FileType
@doc """
Returns the list of file_types.
## Examples
iex> list_file_types()
[%FileType{}, ...]
"""
def list_file_types do
Repo.all(FileType)
end
@doc """
Gets a single file_type.
Raises `Ecto.NoResultsError` if the File type does not exist.
## Examples
iex> get_file_type!(123)
%FileType{}
iex> get_file_type!(456)
** (Ecto.NoResultsError)
"""
def get_file_type!(id), do: Repo.get!(FileType, id)
@doc """
Creates a file_type.
## Examples
iex> create_file_type(%{field: value})
{:ok, %FileType{}}
iex> create_file_type(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_file_type(attrs \\ %{}) do
%FileType{}
|> FileType.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a file_type.
## Examples
iex> update_file_type(file_type, %{field: new_value})
{:ok, %FileType{}}
iex> update_file_type(file_type, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_file_type(%FileType{} = file_type, attrs) do
file_type
|> FileType.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a FileType.
## Examples
iex> delete_file_type(file_type)
{:ok, %FileType{}}
iex> delete_file_type(file_type)
{:error, %Ecto.Changeset{}}
"""
def delete_file_type(%FileType{} = file_type) do
Repo.delete(file_type)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking file_type changes.
## Examples
iex> change_file_type(file_type)
%Ecto.Changeset{source: %FileType{}}
"""
def change_file_type(%FileType{} = file_type) do
FileType.changeset(file_type, %{})
end
alias Seren.Player.Album
@doc """
Returns the list of albums.
## Examples
iex> list_albums()
[%Album{}, ...]
"""
def list_albums do
Repo.all(Album)
end
@doc """
Gets a single album.
Raises `Ecto.NoResultsError` if the Album does not exist.
## Examples
iex> get_album!(123)
%Album{}
iex> get_album!(456)
** (Ecto.NoResultsError)
"""
def get_album!(id), do: Repo.get!(Album, id)
@doc """
Creates a album.
## Examples
iex> create_album(%{field: value})
{:ok, %Album{}}
iex> create_album(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_album(attrs \\ %{}) do
%Album{}
|> Album.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a album.
## Examples
iex> update_album(album, %{field: new_value})
{:ok, %Album{}}
iex> update_album(album, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_album(%Album{} = album, attrs) do
album
|> Album.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Album.
## Examples
iex> delete_album(album)
{:ok, %Album{}}
iex> delete_album(album)
{:error, %Ecto.Changeset{}}
"""
def delete_album(%Album{} = album) do
Repo.delete(album)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking album changes.
## Examples
iex> change_album(album)
%Ecto.Changeset{source: %Album{}}
"""
def change_album(%Album{} = album) do
Album.changeset(album, %{})
end
end | lib/seren/player/player.ex | 0.843009 | 0.545225 | player.ex | starcoder |
defprotocol Phoenix.HTML.FormData do
@moduledoc """
Converts a data structure into a [`Phoenix.HTML.Form`](`t:Phoenix.HTML.Form.t/0`) struct.
"""
@doc """
Converts a data structure into a [`Phoenix.HTML.Form`](`t:Phoenix.HTML.Form.t/0`) struct.
The options are the same options given to `form_for/4`. It
can be used by implementations to configure their behaviour
and it must be stored in the underlying struct, with any
custom field removed.
"""
@spec to_form(t, Keyword.t()) :: Phoenix.HTML.Form.t()
def to_form(data, options)
@doc """
Converts the field in the given form based on the data structure
into a list of [`Phoenix.HTML.Form`](`t:Phoenix.HTML.Form.t/0`) structs.
The options are the same options given to `inputs_for/4`. It
can be used by implementations to configure their behaviour
and it must be stored in the underlying struct, with any
custom field removed.
"""
@spec to_form(t, Phoenix.HTML.Form.t(), Phoenix.HTML.Form.field(), Keyword.t()) ::
[Phoenix.HTML.Form.t()]
def to_form(data, form, field, options)
@doc """
Returns the value for the given field.
"""
@spec input_value(t, Phoenix.HTML.Form.t(), Phoenix.HTML.Form.field()) :: term
def input_value(data, form, field)
@doc """
Returns the HTML5 validations that would apply to
the given field.
"""
@spec input_validations(t, Phoenix.HTML.Form.t(), Phoenix.HTML.Form.field()) :: Keyword.t()
def input_validations(data, form, field)
@doc """
Receives the given field and returns its input type (:text_input,
:select, etc). Returns `nil` if the type is unknown.
"""
@spec input_type(t, Phoenix.HTML.Form.t(), Phoenix.HTML.Form.field()) :: atom | nil
def input_type(data, form, field)
end
defimpl Phoenix.HTML.FormData, for: [Plug.Conn, Atom] do
def to_form(conn_or_atom, opts) do
{name, params, opts} = name_params_and_opts(conn_or_atom, opts)
{errors, opts} = Keyword.pop(opts, :errors, [])
id = Keyword.get(opts, :id) || name
unless is_binary(id) or is_nil(id) do
raise ArgumentError, ":id option in form_for must be a binary/string, got: #{inspect(id)}"
end
%Phoenix.HTML.Form{
source: conn_or_atom,
impl: __MODULE__,
id: id,
name: name,
params: params,
data: %{},
errors: errors,
options: opts
}
end
case @for do
Atom ->
defp name_params_and_opts(atom, opts) do
{params, opts} = Keyword.pop(opts, :params, %{})
{Atom.to_string(atom), params, opts}
end
Plug.Conn ->
defp name_params_and_opts(conn, opts) do
case Keyword.pop(opts, :as) do
{nil, opts} ->
{nil, conn.params, opts}
{name, opts} ->
name = to_string(name)
{name, Map.get(conn.params, name) || %{}, opts}
end
end
end
def to_form(conn_or_atom, form, field, opts) when is_atom(field) or is_binary(field) do
{default, opts} = Keyword.pop(opts, :default, %{})
{prepend, opts} = Keyword.pop(opts, :prepend, [])
{append, opts} = Keyword.pop(opts, :append, [])
{name, opts} = Keyword.pop(opts, :as)
{id, opts} = Keyword.pop(opts, :id)
{hidden, opts} = Keyword.pop(opts, :hidden, [])
id = to_string(id || form.id <> "_#{field}")
name = to_string(name || form.name <> "[#{field}]")
params = Map.get(form.params, field_to_string(field))
cond do
# cardinality: one
is_map(default) ->
[
%Phoenix.HTML.Form{
source: conn_or_atom,
impl: __MODULE__,
id: id,
name: name,
data: default,
params: params || %{},
hidden: hidden,
options: opts
}
]
# cardinality: many
is_list(default) ->
entries =
if params do
params
|> Enum.sort_by(&elem(&1, 0))
|> Enum.map(&{nil, elem(&1, 1)})
else
Enum.map(prepend ++ default ++ append, &{&1, %{}})
end
for {{data, params}, index} <- Enum.with_index(entries) do
index_string = Integer.to_string(index)
%Phoenix.HTML.Form{
source: conn_or_atom,
impl: __MODULE__,
index: index,
id: id <> "_" <> index_string,
name: name <> "[" <> index_string <> "]",
data: data,
params: params,
hidden: hidden,
options: opts
}
end
end
end
def input_value(_conn_or_atom, %{data: data, params: params}, field)
when is_atom(field) or is_binary(field) do
key = field_to_string(field)
case params do
%{^key => value} -> value
%{} -> Map.get(data, field)
end
end
def input_type(_conn_or_atom, _form, _field), do: :text_input
def input_validations(_conn_or_atom, _form, _field), do: []
# Normalize field name to string version
defp field_to_string(field) when is_atom(field), do: Atom.to_string(field)
defp field_to_string(field) when is_binary(field), do: field
end | lib/phoenix_html/form_data.ex | 0.911559 | 0.692044 | form_data.ex | starcoder |
defmodule AshPostgres.DataLayer do
@manage_tenant %Ash.Dsl.Section{
name: :manage_tenant,
describe: """
Configuration for the behavior of a resource that manages a tenant
""",
examples: [
"""
manage_tenant do
template ["organization_", :id]
create? true
update? false
end
"""
],
schema: [
template: [
type: {:custom, __MODULE__, :tenant_template, []},
required: true,
doc: """
A template that will cause the resource to create/manage the specified schema.
Use this if you have a resource that, when created, it should create a new tenant
for you. For example, if you have a `customer` resource, and you want to create
a schema for each customer based on their id, e.g `customer_10` set this option
to `["customer_", :id]`. Then, when this is created, it will create a schema called
`["customer_", :id]`, and run your tenant migrations on it. Then, if you were to change
that customer's id to `20`, it would rename the schema to `customer_20`. Generally speaking
you should avoid changing the tenant id.
"""
],
create?: [
type: :boolean,
default: true,
doc: "Whether or not to automatically create a tenant when a record is created"
],
update?: [
type: :boolean,
default: true,
doc: "Whether or not to automatically update the tenant name if the record is udpated"
]
]
}
@reference %Ash.Dsl.Entity{
name: :reference,
describe: """
Configures the reference for a relationship in resource migrations.
Keep in mind that multiple relationships can theoretically involve the same destination and foreign keys.
In those cases, you only need to configure the `reference` behavior for one of them. Any conflicts will result
in an error, across this resource and any other resources that share a table with this one. For this reason,
instead of adding a reference configuration for `:nothing`, its best to just leave the configuration out, as that
is the default behavior if *no* relationship anywhere has configured the behavior of that reference.
""",
examples: [
"reference :post, on_delete: :delete, on_update: :update, name: \"comments_to_posts_fkey\""
],
args: [:relationship],
target: AshPostgres.Reference,
schema: AshPostgres.Reference.schema()
}
@references %Ash.Dsl.Section{
name: :references,
describe: """
A section for configuring the references (foreign keys) in resource migrations.
This section is only relevant if you are using the migration generator with this resource.
Otherwise, it has no effect.
""",
examples: [
"""
references do
reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey"
end
"""
],
entities: [@reference],
schema: [
polymorphic_on_delete: [
type: {:one_of, [:delete, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables."
],
polymorphic_on_update: [
type: {:one_of, [:update, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables."
],
polymorphic_name: [
type: {:one_of, [:update, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables."
]
]
}
@check_constraint %Ash.Dsl.Entity{
name: :check_constraint,
describe: """
Add a check constraint to be validated.
If a check constraint exists on the table but not in this section, and it produces an error, a runtime error will be raised.
Provide a list of attributes instead of a single attribute to add the message to multiple attributes.
By adding the `check` option, the migration generator will include it when generating migrations.
""",
examples: [
"""
check_constraint :price, "price_must_be_positive", check: "price > 0", message: "price must be positive"
"""
],
args: [:attribute, :name],
target: AshPostgres.CheckConstraint,
schema: AshPostgres.CheckConstraint.schema()
}
@check_constraints %Ash.Dsl.Section{
name: :check_constraints,
describe: """
A section for configuring the check constraints for a given table.
This can be used to automatically create those check constraints, or just to provide message when they are raised
""",
examples: [
"""
check_constraints do
check_constraint :price, "price_must_be_positive", check: "price > 0", message: "price must be positive"
end
"""
],
entities: [@check_constraint]
}
@references %Ash.Dsl.Section{
name: :references,
describe: """
A section for configuring the references (foreign keys) in resource migrations.
This section is only relevant if you are using the migration generator with this resource.
Otherwise, it has no effect.
""",
examples: [
"""
references do
reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey"
end
"""
],
entities: [@reference],
schema: [
polymorphic_on_delete: [
type: {:one_of, [:delete, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables."
],
polymorphic_on_update: [
type: {:one_of, [:update, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables."
],
polymorphic_name: [
type: {:one_of, [:update, :nilify, :nothing, :restrict]},
doc:
"For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables."
]
]
}
@postgres %Ash.Dsl.Section{
name: :postgres,
describe: """
Postgres data layer configuration
""",
sections: [
@manage_tenant,
@references,
@check_constraints
],
modules: [
:repo
],
examples: [
"""
postgres do
repo MyApp.Repo
table "organizations"
end
"""
],
schema: [
repo: [
type: :atom,
required: true,
doc:
"The repo that will be used to fetch your data. See the `AshPostgres.Repo` documentation for more"
],
migrate?: [
type: :boolean,
default: true,
doc:
"Whether or not to include this resource in the generated migrations with `mix ash.generate_migrations`"
],
base_filter_sql: [
type: :string,
doc:
"A raw sql version of the base_filter, e.g `representative = true`. Required if trying to create a unique constraint on a resource with a base_filter"
],
skip_unique_indexes: [
type: {:custom, __MODULE__, :validate_skip_unique_indexes, []},
default: false,
doc: "Skip generating unique indexes when generating migrations"
],
unique_index_names: [
type: :any,
default: [],
doc: """
A list of unique index names that could raise errors, or an mfa to a function that takes a changeset
and returns the list. Must be in the format `{[:affected, :keys], "name_of_constraint"}` or `{[:affected, :keys], "name_of_constraint", "custom error message"}`
Note that this is *not* used to rename the unique indexes created from `identities`.
Use `identity_index_names` for that. This is used to tell ash_postgres about unique indexes that
exist in the database that it didn't create.
"""
],
identity_index_names: [
type: :any,
default: [],
doc: """
A keyword list of identity names to the unique index name that they should use when being managed by the migration
generator.
"""
],
foreign_key_names: [
type: :any,
default: [],
doc: """
A list of foreign keys that could raise errors, or an mfa to a function that takes a changeset and returns the list.
Must be in the format `{:key, "name_of_constraint"}` or `{:key, "name_of_constraint", "custom error message"}`
"""
],
table: [
type: :string,
doc:
"The table to store and read the resource from. Required unless `polymorphic?` is true."
],
polymorphic?: [
type: :boolean,
default: false,
doc: """
Declares this resource as polymorphic.
Polymorphic resources cannot be read or updated unless the table is provided in the query/changeset context.
For example:
PolymorphicResource
|> Ash.Query.set_context(%{data_layer: %{table: "table"}})
|> MyApi.read!()
When relating to polymorphic resources, you'll need to use the `context` option on relationships,
e.g
belongs_to :polymorphic_association, PolymorphicResource,
context: %{data_layer: %{table: "table"}}
"""
]
]
}
alias Ash.Filter
alias Ash.Query.{BooleanExpression, Not, Ref}
alias Ash.Query.Function.{Ago, Contains}
alias Ash.Query.Operator.IsNil
alias AshPostgres.Functions.{Fragment, TrigramSimilarity, Type}
import AshPostgres, only: [repo: 1]
@behaviour Ash.DataLayer
@sections [@postgres]
@moduledoc """
A postgres data layer that levereges Ecto's postgres capabilities.
# Table of Contents
#{Ash.Dsl.Extension.doc_index(@sections)}
#{Ash.Dsl.Extension.doc(@sections)}
"""
use Ash.Dsl.Extension,
sections: @sections,
transformers: [
AshPostgres.Transformers.VerifyRepo,
AshPostgres.Transformers.EnsureTableOrPolymorphic
]
@doc false
def tenant_template(value) do
value = List.wrap(value)
if Enum.all?(value, &(is_binary(&1) || is_atom(&1))) do
{:ok, value}
else
{:error, "Expected all values for `manages_tenant` to be strings or atoms"}
end
end
@doc false
def validate_skip_unique_indexes(indexes) do
indexes = List.wrap(indexes)
if Enum.all?(indexes, &is_atom/1) do
{:ok, indexes}
else
{:error, "All indexes to skip must be atoms"}
end
end
import Ecto.Query, only: [from: 2, subquery: 1]
@impl true
def can?(_, :async_engine), do: true
def can?(_, :transact), do: true
def can?(_, :composite_primary_key), do: true
def can?(_, :upsert), do: true
def can?(resource, {:join, other_resource}) do
data_layer = Ash.DataLayer.data_layer(resource)
other_data_layer = Ash.DataLayer.data_layer(other_resource)
data_layer == other_data_layer and repo(data_layer) == repo(other_data_layer)
end
def can?(resource, {:lateral_join, other_resource}) do
data_layer = Ash.DataLayer.data_layer(resource)
other_data_layer = Ash.DataLayer.data_layer(other_resource)
data_layer == other_data_layer and repo(data_layer) == repo(other_data_layer)
end
def can?(_, :boolean_filter), do: true
def can?(_, {:aggregate, :count}), do: true
def can?(_, {:aggregate, :sum}), do: true
def can?(_, :aggregate_filter), do: true
def can?(_, :aggregate_sort), do: true
def can?(_, :create), do: true
def can?(_, :select), do: true
def can?(_, :read), do: true
def can?(_, :update), do: true
def can?(_, :destroy), do: true
def can?(_, :filter), do: true
def can?(_, :limit), do: true
def can?(_, :offset), do: true
def can?(_, :multitenancy), do: true
def can?(_, {:filter_expr, _}), do: true
def can?(_, :nested_expressions), do: true
def can?(_, {:query_aggregate, :count}), do: true
def can?(_, :sort), do: true
def can?(_, :distinct), do: true
def can?(_, {:sort, _}), do: true
def can?(_, _), do: false
@impl true
def in_transaction?(resource) do
repo(resource).in_transaction?()
end
@impl true
def limit(query, nil, _), do: {:ok, query}
def limit(query, limit, _resource) do
{:ok, from(row in query, limit: ^limit)}
end
@impl true
def source(resource) do
AshPostgres.table(resource) || ""
end
@impl true
def set_context(resource, data_layer_query, context) do
if context[:data_layer][:table] do
{:ok,
%{
data_layer_query
| from: %{data_layer_query.from | source: {context[:data_layer][:table], resource}}
}}
else
{:ok, data_layer_query}
end
end
@impl true
def offset(query, nil, _), do: query
def offset(%{offset: old_offset} = query, 0, _resource) when old_offset in [0, nil] do
{:ok, query}
end
def offset(query, offset, _resource) do
{:ok, from(row in query, offset: ^offset)}
end
@impl true
def run_query(query, resource) do
if AshPostgres.polymorphic?(resource) && no_table?(query) do
raise_table_error!(resource, :read)
else
{:ok, repo(resource).all(query, repo_opts(query))}
end
end
defp no_table?(%{from: %{source: {"", _}}}), do: true
defp no_table?(_), do: false
defp repo_opts(%Ash.Changeset{tenant: tenant, resource: resource}) do
repo_opts(%{tenant: tenant, resource: resource})
end
defp repo_opts(%{tenant: tenant, resource: resource}) when not is_nil(tenant) do
if Ash.Resource.Info.multitenancy_strategy(resource) == :context do
[prefix: tenant]
else
[]
end
end
defp repo_opts(_), do: []
@impl true
def functions(resource) do
config = repo(resource).config()
functions = [AshPostgres.Functions.Type, AshPostgres.Functions.Fragment]
if "pg_trgm" in (config[:installed_extensions] || []) do
functions ++
[
AshPostgres.Functions.TrigramSimilarity
]
else
functions
end
end
@impl true
def run_aggregate_query(query, aggregates, resource) do
subquery = from(row in subquery(query), select: %{})
query =
Enum.reduce(
aggregates,
subquery,
&add_subquery_aggregate_select(&2, &1, resource)
)
{:ok, repo(resource).one(query, repo_opts(query))}
end
@impl true
def set_tenant(_resource, query, tenant) do
{:ok, Ecto.Query.put_query_prefix(query, to_string(tenant))}
end
@impl true
def run_aggregate_query_with_lateral_join(
query,
aggregates,
root_data,
source_resource,
destination_resource,
source_field,
destination_field
) do
lateral_join_query =
lateral_join_query(
query,
root_data,
source_resource,
source_field,
destination_field
)
subquery = from(row in subquery(lateral_join_query), select: %{})
query =
Enum.reduce(
aggregates,
subquery,
&add_subquery_aggregate_select(&2, &1, destination_resource)
)
{:ok, repo(source_resource).one(query, repo_opts(:query))}
end
@impl true
def run_query_with_lateral_join(
query,
root_data,
source_resource,
_destination_resource,
source_field,
destination_field
) do
query =
lateral_join_query(
query,
root_data,
source_resource,
source_field,
destination_field
)
{:ok, repo(source_resource).all(query, repo_opts(query))}
end
defp lateral_join_query(
query,
root_data,
source_resource,
source_field,
destination_field
) do
source_values = Enum.map(root_data, &Map.get(&1, source_field))
subquery =
subquery(
from(destination in query,
where:
field(destination, ^destination_field) ==
field(parent_as(:source_record), ^source_field)
)
)
source_resource
|> Ash.Query.new()
|> Ash.Query.data_layer_query()
|> case do
{:ok, data_layer_query} ->
from(source in data_layer_query,
as: :source_record,
where: field(source, ^source_field) in ^source_values,
inner_lateral_join: destination in ^subquery,
on: field(source, ^source_field) == field(destination, ^destination_field),
select: destination
)
{:error, error} ->
{:error, error}
end
end
@impl true
def resource_to_query(resource, _),
do: Ecto.Queryable.to_query({AshPostgres.table(resource) || "", resource})
@impl true
def create(resource, changeset) do
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :create)
|> repo(resource).insert(repo_opts(changeset))
|> handle_errors()
|> case do
{:ok, result} ->
maybe_create_tenant!(resource, result)
{:ok, result}
{:error, error} ->
{:error, error}
end
end
defp maybe_create_tenant!(resource, result) do
if AshPostgres.manage_tenant_create?(resource) do
tenant_name = tenant_name(resource, result)
AshPostgres.MultiTenancy.create_tenant!(tenant_name, repo(resource))
else
:ok
end
end
defp maybe_update_tenant(resource, changeset, result) do
if AshPostgres.manage_tenant_update?(resource) do
changing_tenant_name? =
resource
|> AshPostgres.manage_tenant_template()
|> Enum.filter(&is_atom/1)
|> Enum.any?(&Ash.Changeset.changing_attribute?(changeset, &1))
if changing_tenant_name? do
old_tenant_name = tenant_name(resource, changeset.data)
new_tenant_name = tenant_name(resource, result)
AshPostgres.MultiTenancy.rename_tenant(repo(resource), old_tenant_name, new_tenant_name)
end
end
:ok
end
defp tenant_name(resource, result) do
resource
|> AshPostgres.manage_tenant_template()
|> Enum.map_join(fn item ->
if is_binary(item) do
item
else
result
|> Map.get(item)
|> to_string()
end
end)
end
defp handle_errors({:error, %Ecto.Changeset{errors: errors}}) do
{:error, Enum.map(errors, &to_ash_error/1)}
end
defp handle_errors({:ok, val}), do: {:ok, val}
defp to_ash_error({field, {message, vars}}) do
Ash.Error.Changes.InvalidAttribute.exception(field: field, message: message, vars: vars)
end
defp ecto_changeset(record, changeset, type) do
ecto_changeset =
record
|> set_table(changeset, type)
|> Ecto.Changeset.change(changeset.attributes)
|> add_configured_foreign_key_constraints(record.__struct__)
|> add_unique_indexes(record.__struct__, changeset)
|> add_check_constraints(record.__struct__)
case type do
:create ->
ecto_changeset
|> add_my_foreign_key_constraints(record.__struct__)
type when type in [:upsert, :update] ->
ecto_changeset
|> add_my_foreign_key_constraints(record.__struct__)
|> add_related_foreign_key_constraints(record.__struct__)
:delete ->
ecto_changeset
|> add_related_foreign_key_constraints(record.__struct__)
end
end
defp set_table(record, changeset, operation) do
if AshPostgres.polymorphic?(record.__struct__) do
table = changeset.context[:data_layer][:table] || AshPostgres.table(record.__struct)
if table do
Ecto.put_meta(record, source: table)
else
raise_table_error!(changeset.resource, operation)
end
else
record
end
end
defp add_check_constraints(changeset, resource) do
resource
|> AshPostgres.check_constraints()
|> Enum.reduce(changeset, fn constraint, changeset ->
constraint.attribute
|> List.wrap()
|> Enum.reduce(changeset, fn attribute, changeset ->
Ecto.Changeset.check_constraint(changeset, attribute,
name: constraint.name,
message: constraint.message || "is invalid"
)
end)
end)
end
defp add_related_foreign_key_constraints(changeset, resource) do
# TODO: this doesn't guarantee us to get all of them, because if something is related to this
# schema and there is no back-relation, then this won't catch it's foreign key constraints
resource
|> Ash.Resource.Info.relationships()
|> Enum.map(& &1.destination)
|> Enum.uniq()
|> Enum.flat_map(fn related ->
related
|> Ash.Resource.Info.relationships()
|> Enum.filter(&(&1.destination == resource))
|> Enum.map(&Map.take(&1, [:source, :source_field, :destination_field]))
end)
|> Enum.uniq()
|> Enum.reduce(changeset, fn %{
source: source,
source_field: source_field,
destination_field: destination_field
},
changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, destination_field,
name: "#{AshPostgres.table(source)}_#{source_field}_fkey",
message: "would leave records behind"
)
end)
end
defp add_my_foreign_key_constraints(changeset, resource) do
resource
|> Ash.Resource.Info.relationships()
|> Enum.reduce(changeset, &Ecto.Changeset.foreign_key_constraint(&2, &1.source_field))
end
defp add_configured_foreign_key_constraints(changeset, resource) do
resource
|> AshPostgres.foreign_key_names()
|> case do
{m, f, a} -> List.wrap(apply(m, f, [changeset | a]))
value -> List.wrap(value)
end
|> Enum.reduce(changeset, fn
{key, name}, changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, key, name: name)
{key, name, message}, changeset ->
Ecto.Changeset.foreign_key_constraint(changeset, key, name: name, message: message)
end)
end
defp add_unique_indexes(changeset, resource, ash_changeset) do
changeset =
resource
|> Ash.Resource.Info.identities()
|> Enum.reduce(changeset, fn identity, changeset ->
name =
AshPostgres.identity_index_names(resource)[identity.name] ||
"#{table(resource, ash_changeset)}_#{identity.name}_index"
opts =
if Map.get(identity, :message) do
[name: name, message: identity.message]
else
[name: name]
end
Ecto.Changeset.unique_constraint(changeset, identity.keys, opts)
end)
names =
resource
|> AshPostgres.unique_index_names()
|> case do
{m, f, a} -> List.wrap(apply(m, f, [changeset | a]))
value -> List.wrap(value)
end
names = [
{Ash.Resource.Info.primary_key(resource), table(resource, ash_changeset) <> "_pkey"} | names
]
Enum.reduce(names, changeset, fn
{keys, name}, changeset ->
Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name)
{keys, name, message}, changeset ->
Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name, message: message)
end)
end
@impl true
def upsert(resource, changeset) do
repo_opts =
changeset
|> repo_opts()
|> Keyword.put(:on_conflict, {:replace, Map.keys(changeset.attributes)})
|> Keyword.put(:conflict_target, Ash.Resource.Info.primary_key(resource))
if AshPostgres.manage_tenant_update?(resource) do
{:error, "Cannot currently upsert a resource that owns a tenant"}
else
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :upsert)
|> repo(resource).insert(repo_opts)
|> handle_errors()
end
end
@impl true
def update(resource, changeset) do
changeset.data
|> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset)))
|> ecto_changeset(changeset, :update)
|> repo(resource).update(repo_opts(changeset))
|> handle_errors()
|> case do
{:ok, result} ->
maybe_update_tenant(resource, changeset, result)
{:ok, result}
{:error, error} ->
{:error, error}
end
end
@impl true
def destroy(resource, %{data: record} = changeset) do
record
|> ecto_changeset(changeset, :delete)
|> repo(resource).delete(repo_opts(changeset))
|> case do
{:ok, _record} ->
:ok
{:error, error} ->
handle_errors({:error, error})
end
end
@impl true
def sort(query, sort, resource) do
query = default_bindings(query, resource)
sort
|> sanitize_sort()
|> Enum.reduce({:ok, query}, fn {order, sort}, {:ok, query} ->
binding =
case Map.fetch(query.__ash_bindings__.aggregates, sort) do
{:ok, binding} ->
binding
:error ->
0
end
new_query =
Map.update!(query, :order_bys, fn order_bys ->
order_bys = order_bys || []
sort_expr = %Ecto.Query.QueryExpr{
expr: [
{order, {{:., [], [{:&, [], [binding]}, sort]}, [], []}}
]
}
order_bys ++ [sort_expr]
end)
{:ok, new_query}
end)
end
@impl true
def select(query, select, resource) do
query = default_bindings(query, resource)
{:ok,
from(row in query,
select: struct(row, ^select)
)}
end
@impl true
def distinct(query, distinct_on, resource) do
query = default_bindings(query, resource)
query =
query
|> default_bindings(resource)
|> Map.update!(:distinct, fn distinct ->
distinct =
distinct ||
%Ecto.Query.QueryExpr{
expr: []
}
expr =
Enum.map(distinct_on, fn distinct_on_field ->
binding =
case Map.fetch(query.__ash_bindings__.aggregates, distinct_on_field) do
{:ok, binding} ->
binding
:error ->
0
end
{:asc, {{:., [], [{:&, [], [binding]}, distinct_on_field]}, [], []}}
end)
%{distinct | expr: distinct.expr ++ expr}
end)
{:ok, query}
end
defp sanitize_sort(sort) do
sort
|> List.wrap()
|> Enum.map(fn
{sort, :asc_nils_last} -> {:asc_nulls_last, sort}
{sort, :asc_nils_first} -> {:asc_nulls_first, sort}
{sort, :desc_nils_last} -> {:desc_nulls_last, sort}
{sort, :desc_nils_first} -> {:desc_nulls_first, sort}
{sort, order} -> {order, sort}
sort -> sort
end)
end
@impl true
def filter(query, %{expression: false}, _resource) do
impossible_query = from(row in query, where: false)
{:ok, Map.put(impossible_query, :__impossible__, true)}
end
def filter(query, filter, _resource) do
relationship_paths =
filter
|> Filter.relationship_paths()
|> Enum.map(fn path ->
if can_inner_join?(path, filter) do
{:inner, relationship_path_to_relationships(filter.resource, path)}
else
{:left, relationship_path_to_relationships(filter.resource, path)}
end
end)
new_query =
query
|> join_all_relationships(relationship_paths)
|> add_filter_expression(filter)
{:ok, new_query}
end
defp default_bindings(query, resource) do
Map.put_new(query, :__ash_bindings__, %{
current: Enum.count(query.joins) + 1,
aggregates: %{},
bindings: %{0 => %{path: [], type: :root, source: resource}}
})
end
@known_inner_join_operators [
Eq,
GreaterThan,
GreaterThanOrEqual,
In,
LessThanOrEqual,
LessThan,
NotEq
]
|> Enum.map(&Module.concat(Ash.Query.Operator, &1))
@known_inner_join_functions [
Ago,
Contains
]
|> Enum.map(&Module.concat(Ash.Query.Function, &1))
@known_inner_join_predicates @known_inner_join_functions ++ @known_inner_join_operators
# For consistency's sake, this logic was removed.
# We can revisit it sometime though.
defp can_inner_join?(path, expr, seen_an_or? \\ false)
defp can_inner_join?(path, %{expression: expr}, seen_an_or?),
do: can_inner_join?(path, expr, seen_an_or?)
defp can_inner_join?(_path, expr, _seen_an_or?) when expr in [nil, true, false], do: true
defp can_inner_join?(path, %BooleanExpression{op: :and, left: left, right: right}, seen_an_or?) do
can_inner_join?(path, left, seen_an_or?) || can_inner_join?(path, right, seen_an_or?)
end
defp can_inner_join?(path, %BooleanExpression{op: :or, left: left, right: right}, _) do
can_inner_join?(path, left, true) && can_inner_join?(path, right, true)
end
defp can_inner_join?(
_,
%Not{},
_
) do
false
end
defp can_inner_join?(
search_path,
%struct{__operator__?: true, left: %Ref{relationship_path: relationship_path}},
seen_an_or?
)
when search_path == relationship_path and struct in @known_inner_join_predicates do
not seen_an_or?
end
defp can_inner_join?(
search_path,
%struct{__operator__?: true, right: %Ref{relationship_path: relationship_path}},
seen_an_or?
)
when search_path == relationship_path and struct in @known_inner_join_predicates do
not seen_an_or?
end
defp can_inner_join?(
search_path,
%struct{__function__?: true, arguments: arguments},
seen_an_or?
)
when struct in @known_inner_join_predicates do
if Enum.any?(arguments, &match?(%Ref{relationship_path: ^search_path}, &1)) do
not seen_an_or?
else
true
end
end
defp can_inner_join?(_, _, _), do: false
@impl true
def add_aggregate(query, aggregate, _resource) do
resource = aggregate.resource
query = default_bindings(query, resource)
{query, binding} =
case get_binding(resource, aggregate.relationship_path, query, :aggregate) do
nil ->
relationship = Ash.Resource.Info.relationship(resource, aggregate.relationship_path)
subquery = aggregate_subquery(relationship, aggregate)
new_query =
join_all_relationships(
query,
[
{{:aggregate, aggregate.name, subquery},
relationship_path_to_relationships(resource, aggregate.relationship_path)}
]
)
{new_query, get_binding(resource, aggregate.relationship_path, new_query, :aggregate)}
binding ->
{query, binding}
end
query_with_aggregate_binding =
put_in(
query.__ash_bindings__.aggregates,
Map.put(query.__ash_bindings__.aggregates, aggregate.name, binding)
)
new_query =
query_with_aggregate_binding
|> add_aggregate_to_subquery(resource, aggregate, binding)
|> select_aggregate(resource, aggregate)
{:ok, new_query}
end
defp select_aggregate(query, resource, aggregate) do
binding = get_binding(resource, aggregate.relationship_path, query, :aggregate)
query =
if query.select do
query
else
from(row in query,
select: row,
select_merge: %{aggregates: %{}}
)
end
%{query | select: add_to_select(query.select, binding, aggregate)}
end
defp add_to_select(
%{expr: {:merge, _, [first, {:%{}, _, [{:aggregates, {:%{}, [], fields}}]}]}} = select,
binding,
%{load: nil} = aggregate
) do
accessed = {{:., [], [{:&, [], [binding]}, aggregate.name]}, [], []}
field =
{:type, [],
[
accessed,
Ash.Type.ecto_type(aggregate.type)
]}
field_with_default =
if is_nil(aggregate.default_value) do
field
else
{:coalesce, [],
[
field,
{:type, [],
[
aggregate.default_value,
Ash.Type.ecto_type(aggregate.type)
]}
]}
end
new_fields = [
{aggregate.name, field_with_default}
| fields
]
%{select | expr: {:merge, [], [first, {:%{}, [], [{:aggregates, {:%{}, [], new_fields}}]}]}}
end
defp add_to_select(
%{expr: expr} = select,
binding,
%{load: load_as} = aggregate
) do
accessed = {{:., [], [{:&, [], [binding]}, aggregate.name]}, [], []}
field =
{:type, [],
[
accessed,
Ash.Type.ecto_type(aggregate.type)
]}
field_with_default =
if is_nil(aggregate.default_value) do
field
else
{:coalesce, [],
[
field,
{:type, [],
[
aggregate.default_value,
Ash.Type.ecto_type(aggregate.type)
]}
]}
end
%{select | expr: {:merge, [], [expr, {:%{}, [], [{load_as, field_with_default}]}]}}
end
defp add_aggregate_to_subquery(query, resource, aggregate, binding) do
new_joins =
List.update_at(query.joins, binding - 1, fn join ->
aggregate_query =
if aggregate.authorization_filter do
{:ok, filter} =
filter(
join.source.from.source.query,
aggregate.authorization_filter,
Ash.Resource.Info.related(resource, aggregate.relationship_path)
)
filter
else
join.source.from.source.query
end
new_aggregate_query = add_subquery_aggregate_select(aggregate_query, aggregate, resource)
put_in(join.source.from.source.query, new_aggregate_query)
end)
%{
query
| joins: new_joins
}
end
defp aggregate_subquery(relationship, aggregate) do
query =
from(row in relationship.destination,
group_by: ^relationship.destination_field,
select: field(row, ^relationship.destination_field)
)
if aggregate.query && aggregate.query.tenant do
Ecto.Query.put_query_prefix(query, aggregate.query.tenant)
else
query
end
end
defp order_to_postgres_order(dir) do
case dir do
:asc -> nil
:asc_nils_last -> " ASC NULLS LAST"
:asc_nils_first -> " ASC NULLS FIRST"
:desc -> " DESC"
:desc_nils_last -> " DESC NULLS LAST"
:desc_nils_first -> " DESC NULLS FIRST"
end
end
defp add_subquery_aggregate_select(query, %{kind: kind} = aggregate, _resource)
when kind in [:first, :list] do
query = default_bindings(query, aggregate.resource)
key = aggregate.field
type = Ash.Type.ecto_type(aggregate.type)
field =
if aggregate.query && aggregate.query.sort && aggregate.query.sort != [] do
sort_expr =
aggregate.query.sort
|> Enum.map(fn {sort, order} ->
case order_to_postgres_order(order) do
nil ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}]
order ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}, raw: order]
end
end)
|> Enum.intersperse(raw: ", ")
|> List.flatten()
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: " ORDER BY "
] ++
sort_expr ++ [raw: ")"]}
else
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: ")"
]}
end
{params, filtered} =
if aggregate.query && aggregate.query.filter &&
not match?(%Ash.Filter{expression: nil}, aggregate.query.filter) do
{params, expr} =
filter_to_expr(
aggregate.query.filter,
query.__ash_bindings__.bindings,
query.select.params
)
{params, {:filter, [], [field, expr]}}
else
{[], field}
end
casted =
if kind == :first do
{:type, [],
[
{:fragment, [],
[
raw: "(",
expr: filtered,
raw: ")[1]"
]},
type
]}
else
{:type, [],
[
filtered,
{:array, type}
]}
end
new_expr = {:merge, [], [query.select.expr, {:%{}, [], [{aggregate.name, casted}]}]}
%{query | select: %{query.select | expr: new_expr, params: params}}
end
defp add_subquery_aggregate_select(query, %{kind: :list} = aggregate, _resource) do
query = default_bindings(query, aggregate.resource)
key = aggregate.field
type = Ash.Type.ecto_type(aggregate.type)
field =
if aggregate.query && aggregate.query.sort && aggregate.query.sort != [] do
sort_expr =
aggregate.query.sort
|> Enum.map(fn {sort, order} ->
case order_to_postgres_order(order) do
nil ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}]
order ->
[expr: {{:., [], [{:&, [], [0]}, sort]}, [], []}, raw: order]
end
end)
|> Enum.intersperse(raw: ", ")
|> List.flatten()
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: " ORDER BY "
] ++
sort_expr ++ [raw: ")"]}
else
{:fragment, [],
[
raw: "array_agg(",
expr: {{:., [], [{:&, [], [0]}, key]}, [], []},
raw: ")"
]}
end
{params, filtered} =
if aggregate.query && aggregate.query.filter &&
not match?(%Ash.Filter{expression: nil}, aggregate.query.filter) do
{params, expr} =
filter_to_expr(
aggregate.query.filter,
query.__ash_bindings__.bindings,
query.select.params
)
{params, {:filter, [], [field, expr]}}
else
{[], field}
end
cast = {:type, [], [filtered, {:array, type}]}
new_expr = {:merge, [], [query.select.expr, {:%{}, [], [{aggregate.name, cast}]}]}
%{query | select: %{query.select | expr: new_expr, params: params}}
end
defp add_subquery_aggregate_select(query, %{kind: kind} = aggregate, resource)
when kind in [:count, :sum] do
query = default_bindings(query, aggregate.resource)
key = aggregate.field || List.first(Ash.Resource.Info.primary_key(resource))
type = Ash.Type.ecto_type(aggregate.type)
field = {kind, [], [{{:., [], [{:&, [], [0]}, key]}, [], []}]}
{params, filtered} =
if aggregate.query && aggregate.query.filter &&
not match?(%Ash.Filter{expression: nil}, aggregate.query.filter) do
{params, expr} =
filter_to_expr(
aggregate.query.filter,
query.__ash_bindings__.bindings,
query.select.params
)
{params, {:filter, [], [field, expr]}}
else
{[], field}
end
cast = {:type, [], [filtered, type]}
new_expr = {:merge, [], [query.select.expr, {:%{}, [], [{aggregate.name, cast}]}]}
%{query | select: %{query.select | expr: new_expr, params: params}}
end
defp relationship_path_to_relationships(resource, path, acc \\ [])
defp relationship_path_to_relationships(_resource, [], acc), do: Enum.reverse(acc)
defp relationship_path_to_relationships(resource, [relationship | rest], acc) do
relationship = Ash.Resource.Info.relationship(resource, relationship)
relationship_path_to_relationships(relationship.destination, rest, [relationship | acc])
end
defp join_all_relationships(query, relationship_paths, path \\ [], source \\ nil) do
query = default_bindings(query, source)
Enum.reduce(relationship_paths, query, fn
{_join_type, []}, query ->
query
{join_type, [relationship | rest_rels]}, query ->
source = source || relationship.source
current_path = path ++ [relationship]
current_join_type =
case join_type do
{:aggregate, _name, _agg} when rest_rels != [] ->
:left
other ->
other
end
if has_binding?(source, Enum.reverse(current_path), query, current_join_type) do
query
else
joined_query =
join_relationship(
query,
relationship,
Enum.map(path, & &1.name),
current_join_type,
source
)
joined_query_with_distinct = add_distinct(relationship, join_type, joined_query)
join_all_relationships(
joined_query_with_distinct,
[{join_type, rest_rels}],
current_path,
source
)
end
end)
end
defp has_binding?(resource, path, query, {:aggregate, _, _}),
do: has_binding?(resource, path, query, :aggregate)
defp has_binding?(resource, candidate_path, %{__ash_bindings__: _} = query, type) do
Enum.any?(query.__ash_bindings__.bindings, fn
{_, %{path: path, source: source, type: ^type}} ->
Ash.SatSolver.synonymous_relationship_paths?(resource, path, candidate_path, source)
_ ->
false
end)
end
defp has_binding?(_, _, _, _), do: false
defp get_binding(resource, path, %{__ash_bindings__: _} = query, type) do
paths =
Enum.flat_map(query.__ash_bindings__.bindings, fn
{binding, %{path: path, type: ^type}} ->
[{binding, path}]
_ ->
[]
end)
Enum.find_value(paths, fn {binding, candidate_path} ->
Ash.SatSolver.synonymous_relationship_paths?(resource, candidate_path, path) && binding
end)
end
defp get_binding(_, _, _, _), do: nil
defp add_distinct(relationship, join_type, joined_query) do
if relationship.cardinality == :many and join_type == :left && !joined_query.distinct do
from(row in joined_query,
distinct: ^Ash.Resource.Info.primary_key(relationship.destination)
)
else
joined_query
end
end
defp join_relationship(query, relationship, path, join_type, source) do
case Map.get(query.__ash_bindings__.bindings, path) do
%{type: existing_join_type} when join_type != existing_join_type ->
raise "unreachable?"
nil ->
do_join_relationship(query, relationship, path, join_type, source)
_ ->
query
end
end
defp do_join_relationship(query, %{type: :many_to_many} = relationship, path, kind, source) do
relationship_through = maybe_get_resource_query(relationship.through)
relationship_destination =
Ecto.Queryable.to_query(maybe_get_resource_query(relationship.destination))
current_binding =
Enum.find_value(query.__ash_bindings__.bindings, 0, fn {binding, data} ->
if data.type == kind && data.path == Enum.reverse(path) do
binding
end
end)
new_query =
case kind do
{:aggregate, _, subquery} ->
subquery =
subquery(
from(destination in subquery,
where:
field(destination, ^relationship.destination_field) ==
field(
parent_as(:rel_through),
^relationship.destination_field_on_join_table
)
)
)
from([{row, current_binding}] in query,
left_join: through in ^relationship_through,
as: :rel_through,
on:
field(row, ^relationship.source_field) ==
field(through, ^relationship.source_field_on_join_table),
left_lateral_join: destination in ^subquery,
on:
field(destination, ^relationship.destination_field) ==
field(through, ^relationship.destination_field_on_join_table)
)
:inner ->
from([{row, current_binding}] in query,
join: through in ^relationship_through,
on:
field(row, ^relationship.source_field) ==
field(through, ^relationship.source_field_on_join_table),
join: destination in ^relationship_destination,
on:
field(destination, ^relationship.destination_field) ==
field(through, ^relationship.destination_field_on_join_table)
)
_ ->
from([{row, current_binding}] in query,
left_join: through in ^relationship_through,
on:
field(row, ^relationship.source_field) ==
field(through, ^relationship.source_field_on_join_table),
left_join: destination in ^relationship_destination,
on:
field(destination, ^relationship.destination_field) ==
field(through, ^relationship.destination_field_on_join_table)
)
end
join_path =
Enum.reverse([String.to_existing_atom(to_string(relationship.name) <> "_join_assoc") | path])
full_path = Enum.reverse([relationship.name | path])
binding_data =
case kind do
{:aggregate, name, _agg} ->
%{type: :aggregate, name: name, path: full_path, source: source}
_ ->
%{type: kind, path: full_path, source: source}
end
new_query
|> add_binding(%{path: join_path, type: :left, source: source})
|> add_binding(binding_data)
end
defp do_join_relationship(query, relationship, path, kind, source) do
relationship_destination =
Ecto.Queryable.to_query(maybe_get_resource_query(relationship.destination))
current_binding =
Enum.find_value(query.__ash_bindings__.bindings, 0, fn {binding, data} ->
if data.type == kind && data.path == Enum.reverse(path) do
binding
end
end)
new_query =
case kind do
{:aggregate, _, subquery} ->
subquery =
from(
sub in subquery(
from(destination in subquery,
where:
field(destination, ^relationship.destination_field) ==
field(parent_as(:rel_source), ^relationship.source_field)
)
),
select: field(sub, ^relationship.destination_field)
)
from([{row, current_binding}] in query,
as: :rel_source,
left_lateral_join: destination in ^subquery,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
:inner ->
from([{row, current_binding}] in query,
join: destination in ^relationship_destination,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
_ ->
from([{row, current_binding}] in query,
left_join: destination in ^relationship_destination,
on:
field(row, ^relationship.source_field) ==
field(destination, ^relationship.destination_field)
)
end
full_path = Enum.reverse([relationship.name | path])
binding_data =
case kind do
{:aggregate, name, _agg} ->
%{type: :aggregate, name: name, path: full_path, source: source}
_ ->
%{type: kind, path: full_path, source: source}
end
new_query
|> add_binding(binding_data)
end
defp add_filter_expression(query, filter) do
wheres =
filter
|> split_and_statements()
|> Enum.map(fn filter ->
{params, expr} = filter_to_expr(filter, query.__ash_bindings__.bindings, [])
%Ecto.Query.BooleanExpr{
expr: expr,
op: :and,
params: params
}
end)
%{query | wheres: query.wheres ++ wheres}
end
defp split_and_statements(%Filter{expression: expression}) do
split_and_statements(expression)
end
defp split_and_statements(%BooleanExpression{op: :and, left: left, right: right}) do
split_and_statements(left) ++ split_and_statements(right)
end
defp split_and_statements(%Not{expression: %Not{expression: expression}}) do
split_and_statements(expression)
end
defp split_and_statements(%Not{
expression: %BooleanExpression{op: :or, left: left, right: right}
}) do
split_and_statements(%BooleanExpression{
op: :and,
left: %Not{expression: left},
right: %Not{expression: right}
})
end
defp split_and_statements(other), do: [other]
defp filter_to_expr(expr, bindings, params, embedded? \\ false, type \\ nil)
defp filter_to_expr(%Filter{expression: expression}, bindings, params, embedded?, type) do
filter_to_expr(expression, bindings, params, embedded?, type)
end
# A nil filter means "everything"
defp filter_to_expr(nil, _, _, _, _), do: {[], true}
# A true filter means "everything"
defp filter_to_expr(true, _, _, _, _), do: {[], true}
# A false filter means "nothing"
defp filter_to_expr(false, _, _, _, _), do: {[], false}
defp filter_to_expr(expression, bindings, params, embedded?, type) do
do_filter_to_expr(expression, bindings, params, embedded?, type)
end
defp do_filter_to_expr(expr, bindings, params, embedded?, type \\ nil)
defp do_filter_to_expr(
%BooleanExpression{op: op, left: left, right: right},
bindings,
params,
embedded?,
_type
) do
{params, left_expr} = do_filter_to_expr(left, bindings, params, embedded?)
{params, right_expr} = do_filter_to_expr(right, bindings, params, embedded?)
{params, {op, [], [left_expr, right_expr]}}
end
defp do_filter_to_expr(%Not{expression: expression}, bindings, params, embedded?, _type) do
{params, new_expression} = do_filter_to_expr(expression, bindings, params, embedded?)
{params, {:not, [], [new_expression]}}
end
defp do_filter_to_expr(
%TrigramSimilarity{arguments: [arg1, arg2], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, pred_embedded? || embedded?)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, pred_embedded? || embedded?)
{params, {:fragment, [], [raw: "similarity(", expr: arg1, raw: ", ", expr: arg2, raw: ")"]}}
end
defp do_filter_to_expr(
%Type{arguments: [arg1, arg2], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
)
when pred_embedded? or embedded? do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, true)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, true)
case maybe_ecto_type(arg2) do
nil ->
{params, {:type, [], [arg1, arg2]}}
type ->
case arg1 do
%{__predicate__?: _} ->
{params, {:type, [], [arg1, arg2]}}
value ->
{params, %Ecto.Query.Tagged{value: value, type: type}}
end
end
end
defp do_filter_to_expr(
%Type{arguments: [arg1, arg2], embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, arg1} = do_filter_to_expr(arg1, bindings, params, pred_embedded? || embedded?)
{params, arg2} = do_filter_to_expr(arg2, bindings, params, pred_embedded? || embedded?)
arg2 = maybe_ecto_type(arg2)
{params, {:type, [], [arg1, arg2]}}
end
defp do_filter_to_expr(
%Fragment{arguments: arguments, embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, fragment_data} =
Enum.reduce(arguments, {params, []}, fn
{:raw, str}, {params, fragment_data} ->
{params, fragment_data ++ [{:raw, str}]}
{:expr, expr}, {params, fragment_data} ->
{params, expr} = do_filter_to_expr(expr, bindings, params, pred_embedded? || embedded?)
{params, fragment_data ++ [{:expr, expr}]}
end)
{params, {:fragment, [], fragment_data}}
end
defp do_filter_to_expr(
%IsNil{left: left, right: right, embedded?: pred_embedded?},
bindings,
params,
embedded?,
_type
) do
{params, left_expr} = do_filter_to_expr(left, bindings, params, pred_embedded? || embedded?)
{params, right_expr} = do_filter_to_expr(right, bindings, params, pred_embedded? || embedded?)
{params,
{:==, [],
[
{:is_nil, [], [left_expr]},
right_expr
]}}
end
defp do_filter_to_expr(
%Ago{arguments: [left, right], embedded?: _pred_embedded?},
_bindings,
params,
_embedded?,
_type
)
when is_integer(left) and (is_binary(right) or is_atom(right)) do
{params ++ [{DateTime.utc_now(), {:param, :any_datetime}}],
{:datetime_add, [], [{:^, [], [Enum.count(params)]}, left * -1, to_string(right)]}}
end
defp do_filter_to_expr(
%Contains{arguments: [left, %Ash.CiString{} = right], embedded?: pred_embedded?},
bindings,
params,
embedded?,
type
) do
do_filter_to_expr(
%Fragment{
embedded?: pred_embedded?,
arguments: [
raw: "strpos(",
expr: left,
raw: "::citext, ",
expr: right,
raw: ") > 0"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(
%Contains{arguments: [left, right], embedded?: pred_embedded?},
bindings,
params,
embedded?,
type
) do
do_filter_to_expr(
%Fragment{
embedded?: pred_embedded?,
arguments: [
raw: "strpos(",
expr: left,
raw: ", ",
expr: right,
raw: ") > 0"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(
%mod{
__predicate__?: _,
left: left,
right: right,
embedded?: pred_embedded?,
operator: op
},
bindings,
params,
embedded?,
_type
) do
[left_type, right_type] = determine_types(mod, [left, right])
{params, left_expr} =
do_filter_to_expr(left, bindings, params, pred_embedded? || embedded?, left_type)
{params, right_expr} =
do_filter_to_expr(right, bindings, params, pred_embedded? || embedded?, right_type)
{params,
{op, [],
[
left_expr,
right_expr
]}}
end
defp do_filter_to_expr(
%Ref{attribute: %{name: name}} = ref,
bindings,
params,
_embedded?,
_type
) do
{params, {{:., [], [{:&, [], [ref_binding(ref, bindings)]}, name]}, [], []}}
end
defp do_filter_to_expr({:embed, other}, _bindings, params, _true, _type) do
{params, other}
end
defp do_filter_to_expr(%Ash.CiString{string: string}, bindings, params, embedded?, type) do
do_filter_to_expr(
%Fragment{
embedded?: embedded?,
arguments: [
raw: "",
expr: string,
raw: "::citext"
]
},
bindings,
params,
embedded?,
type
)
end
defp do_filter_to_expr(%MapSet{} = mapset, bindings, params, embedded?, type) do
do_filter_to_expr(Enum.to_list(mapset), bindings, params, embedded?, type)
end
defp do_filter_to_expr(other, _bindings, params, true, _type) do
{params, other}
end
defp do_filter_to_expr(value, _bindings, params, false, type) do
type = type || :any
value = last_ditch_cast(value, type)
{params ++ [{value, type}], {:^, [], [Enum.count(params)]}}
end
defp maybe_ecto_type({:array, type}), do: {:array, maybe_ecto_type(type)}
defp maybe_ecto_type(type) when is_atom(type) do
if Ash.Type.ash_type?(type) do
Ash.Type.ecto_type(type)
end
end
defp maybe_ecto_type(_type), do: nil
defp last_ditch_cast(value, {:in, type}) when is_list(value) do
Enum.map(value, &last_ditch_cast(&1, type))
end
defp last_ditch_cast(value, _) when is_atom(value) do
to_string(value)
end
defp last_ditch_cast(value, _type) do
value
end
defp determine_types(mod, values) do
mod.types()
|> Enum.map(fn types ->
case types do
:same ->
types =
for _ <- values do
:same
end
closest_fitting_type(types, values)
:any ->
for _ <- values do
:any
end
types ->
closest_fitting_type(types, values)
end
end)
|> Enum.min_by(fn types ->
types
|> Enum.map(&vagueness/1)
|> Enum.sum()
end)
end
defp closest_fitting_type(types, values) do
types_with_values = Enum.zip(types, values)
types_with_values
|> fill_in_known_types()
|> clarify_types()
end
defp clarify_types(types) do
basis =
types
|> Enum.map(&elem(&1, 0))
|> Enum.min_by(&vagueness(&1))
Enum.map(types, fn {type, _value} ->
replace_same(type, basis)
end)
end
defp replace_same({:in, type}, basis) do
{:in, replace_same(type, basis)}
end
defp replace_same(:same, :same) do
:any
end
defp replace_same(:same, {:in, :same}) do
{:in, :any}
end
defp replace_same(:same, basis) do
basis
end
defp replace_same(other, _basis) do
other
end
defp fill_in_known_types(types) do
Enum.map(types, &fill_in_known_type/1)
end
defp fill_in_known_type({vague_type, %Ref{attribute: %{type: type}}} = ref)
when vague_type in [:any, :same] do
if Ash.Type.ash_type?(type) do
{type |> Ash.Type.storage_type() |> array_to_in(), ref}
else
{type |> array_to_in(), ref}
end
end
defp fill_in_known_type(
{{:array, type}, %Ref{attribute: %{type: {:array, type}} = attribute} = ref}
) do
{:in, fill_in_known_type({type, %{ref | attribute: %{attribute | type: type}}})}
end
defp fill_in_known_type({type, value}), do: {array_to_in(type), value}
defp array_to_in({:array, v}), do: {:in, array_to_in(v)}
defp array_to_in(v), do: v
defp vagueness({:in, type}), do: vagueness(type)
defp vagueness(:same), do: 2
defp vagueness(:any), do: 1
defp vagueness(_), do: 0
defp ref_binding(ref, bindings) do
case ref.attribute do
%Ash.Resource.Attribute{} ->
Enum.find_value(bindings, fn {binding, data} ->
data.path == ref.relationship_path && data.type in [:inner, :left, :root] && binding
end)
%Ash.Query.Aggregate{} = aggregate ->
Enum.find_value(bindings, fn {binding, data} ->
data.path == aggregate.relationship_path && data.type == :aggregate && binding
end)
end
end
defp add_binding(query, data) do
current = query.__ash_bindings__.current
bindings = query.__ash_bindings__.bindings
new_ash_bindings = %{
query.__ash_bindings__
| bindings: Map.put(bindings, current, data),
current: current + 1
}
%{query | __ash_bindings__: new_ash_bindings}
end
@impl true
def transaction(resource, func) do
repo(resource).transaction(func)
end
@impl true
def rollback(resource, term) do
repo(resource).rollback(term)
end
defp maybe_get_resource_query(resource) do
case Ash.Query.data_layer_query(Ash.Query.new(resource), only_validate_filter?: false) do
{:ok, query} -> query
{:error, error} -> {:error, error}
end
end
defp table(resource, changeset) do
changeset.context[:data_layer][:table] || AshPostgres.table(resource)
end
defp raise_table_error!(resource, operation) do
if AshPostgres.polymorphic?(resource) do
raise """
Could not determine table for #{operation} on #{inspect(resource)}.
Polymorphic resources require that the `data_layer[:table]` context is provided.
See the guide on polymorphic resources for more information.
"""
else
raise """
Could not determine table for #{operation} on #{inspect(resource)}.
"""
end
end
end | lib/data_layer.ex | 0.935942 | 0.52074 | data_layer.ex | starcoder |
defmodule Absinthe.Type.Union do
@moduledoc """
A unions is an abstract type made up of multiple possible concrete types.
No common fields are declared in a union. Compare to `Absinthe.Type.Interface`.
Because it's necessary for the union to determine the concrete type of a
resolved object, you must either:
* Provide a `:resolve_type` function on the union
* Provide a `:is_type_of` function on each possible concrete type
```
union :search_result do
description "A search result"
types [:person, :business]
resolve_type fn
%Person{}, _ -> :person
%Business{}, _ -> :business
end
end
```
"""
use Absinthe.Introspection.Kind
alias Absinthe.{Schema, Type}
@typedoc """
* `:name` - The name of the union type. Should be a TitleCased `binary`. Set automatically.
* `:description` - A nice description for introspection.
* `:types` - The list of possible types.
* `:resolve_type` - A function used to determine the concrete type of a resolved object. See also `Absinthe.Type.Object`'s `:is_type_of`. Either `resolve_type` is specified in the union type, or every object type in the union must specify `is_type_of`
The `:resolve_type` function will be passed two arguments; the object whose type needs to be identified, and the `Absinthe.Execution` struct providing the full execution context.
The `__private__` and `:__reference__` keys are for internal use.
"""
@type t :: %__MODULE__{
name: binary,
description: binary,
types: [Type.identifier_t],
resolve_type: ((any, Absinthe.Resolution.t) -> atom | nil),
identifier: atom,
__private__: Keyword.t,
__reference__: Type.Reference.t,
}
defstruct [
name: nil,
description: nil,
resolve_type: nil,
identifier: nil,
types: [],
__private__: [],
__reference__: nil,
]
def build(%{attrs: attrs}) do
quote do: %unquote(__MODULE__){unquote_splicing(attrs)}
end
@doc false
@spec member?(t, Type.t) :: boolean
def member?(%{types: types}, %{__reference__: %{identifier: ident}}) do
ident in types
end
def member?(_, _) do
false
end
@doc false
@spec resolve_type(t, any, Absinthe.Resolution.t) :: Type.t | nil
def resolve_type(type, object, env, opts \\ [lookup: true])
def resolve_type(%{resolve_type: nil, types: types}, obj, %{schema: schema}, opts) do
type_name = Enum.find(types, fn
%{is_type_of: nil} ->
false
type ->
case Schema.lookup_type(schema, type) do
nil ->
false
%{is_type_of: nil} ->
false
%{is_type_of: check} ->
check.(obj)
end
end)
if opts[:lookup] do
Schema.lookup_type(schema, type_name)
else
type_name
end
end
def resolve_type(%{resolve_type: resolver}, obj, %{schema: schema} = env, opts) do
case resolver.(obj, env) do
nil ->
nil
ident when is_atom(ident) ->
if opts[:lookup] do
Absinthe.Schema.lookup_type(schema, ident)
else
ident
end
end
end
end | deps/absinthe/lib/absinthe/type/union.ex | 0.858422 | 0.881615 | union.ex | starcoder |
defmodule Slack do
@moduledoc """
Slack is a genserver-ish interface for working with the Slack real time
messaging API through a Websocket connection.
To use this module you'll need a valid Slack API token. You can find your
personal token on the [Slack Web API] page, or you can add a new
[bot integration].
[Slack Web API]: https://api.slack.com/web
[bot integration]: https://api.slack.com/bot-users
## Example
```
defmodule Bot do
use Slack
def handle_message(message = %{type: "message"}, slack) do
if message.text == "Hi" do
send_message("Hello to you too!", message.channel, slack)
end
end
end
Bot.start_link("API_TOKEN")
```
`handle_*` methods are always passed `slack` and `state` arguments. The
`slack` argument holds the state of Slack and is kept up to date
automatically.
In this example we're just matching against the message type and checking if
the text content is "Hi" and if so, we reply with our own greeting.
The message type is pattern matched against because the
[Slack RTM API](https://api.slack.com/rtm) defines many different types of
messages that we can receive. Because of this it's wise to write a catch-all
`handle_message/3` in your bots to prevent crashing.
## Callbacks
* `handle_connect(slack)` - called when connected to Slack.
* `handle_message(message, slack)` - called when a message is received.
* `handle_close(reason, slack)` - called when websocket is closed.
* `handle_info(message, slack)` - called when any other message is received in the process mailbox.
## Slack argument
The Slack argument that's passed to each callback is what contains all of the
state related to Slack including a list of channels, users, groups, bots, and
even the socket.
Here's a list of what's stored:
* me - The current bot/users information stored as a map of properties.
* team - The current team's information stored as a map of properties.
* bots - Stored as a map with id's as keys.
* channels - Stored as a map with id's as keys.
* groups - Stored as a map with id's as keys.
* users - Stored as a map with id's as keys.
* ims (direct message channels) - Stored as a map with id's as keys.
* socket - The connection to Slack.
* client - The client that makes calls to Slack.
For all but `socket` and `client`, you can see what types of data to expect each of the
types to contain from the [Slack API types] page.
[Slack API types]: https://api.slack.com/types
"""
defmacro __using__(_) do
quote do
@behaviour :websocket_client_handler
require Logger
import Slack
import Slack.Lookups
import Slack.Sends
def start_link(token, client \\ :websocket_client) do
case Slack.Rtm.start(token) do
{:ok, rtm} ->
state = %{
rtm: rtm,
client: client,
token: token
}
url = String.to_char_list(rtm.url)
client.start_link(url, __MODULE__, state)
{:error, %HTTPoison.Error{reason: :connect_timeout}} ->
{:error, "Timed out while connecting to the Slack RTM API"}
{:error, %HTTPoison.Error{reason: :nxdomain}} ->
{:error, "Could not connect to the Slack RTM API"}
{:error, error} ->
{:error, error}
end
end
def init(%{rtm: rtm, client: client, token: token}, socket) do
slack = %Slack.State{
socket: socket,
client: client,
token: token,
me: rtm.self,
team: rtm.team,
bots: rtm_list_to_map(rtm.bots),
channels: rtm_list_to_map(rtm.channels),
groups: rtm_list_to_map(rtm.groups),
users: rtm_list_to_map(rtm.users),
ims: rtm_list_to_map(rtm.ims)
}
handle_connect(slack)
{:ok, slack}
end
def websocket_info(:start, _connection, state) do
{:ok, state}
end
def websocket_info(message, _connection, slack) do
try do
handle_info(message, slack)
rescue
e -> handle_exception(e)
end
{:ok, slack}
end
def websocket_terminate(reason, _conn, slack) do
try do
handle_close(reason, slack)
rescue
e -> handle_exception(e)
end
end
def websocket_handle({:ping, data}, _conn, state) do
{:reply, {:pong, data}, state}
end
def websocket_handle({:text, message}, _conn, slack) do
message = prepare_message message
slack = Slack.State.update(message, slack)
slack = if Map.has_key?(message, :type) do
try do
handle_message(message, slack)
slack
rescue
e -> handle_exception(e)
end
else
slack
end
{:ok, slack}
end
defp rtm_list_to_map(list) do
Enum.reduce(list, %{}, fn (item, map) ->
Map.put(map, item.id, item)
end)
end
defp prepare_message(binstring) do
binstring
|> :binary.split(<<0>>)
|> List.first
|> JSX.decode!([{:labels, :atom}])
end
defp handle_exception(e) do
message = Exception.message(e)
Logger.error(message)
System.stacktrace |> Exception.format_stacktrace |> Logger.error
raise message
end
def handle_connect(_slack ), do: :ok
def handle_message(_message, _slack), do: :ok
def handle_close(_reason, _slack), do: :ok
def handle_info(_message, _slack), do: :ok
defoverridable [handle_connect: 1, handle_message: 2, handle_close: 2, handle_info: 2]
end
end
end | lib/slack.ex | 0.824073 | 0.820073 | slack.ex | starcoder |
defmodule Bolt.Sips.Metadata do
@moduledoc false
defstruct [:bookmarks, :tx_timeout, :metadata]
@type t :: %__MODULE__{
bookmarks: [String.t()],
tx_timeout: non_neg_integer(),
metadata: map()
}
alias Bolt.Sips.Metadata
@doc """
Create a new metadata structure.
Data must be valid.
"""
@spec new(map()) :: {:ok, Bolt.Sips.Metadata.t()} | {:error, String.t()}
def new(data) do
with {:ok, data} <- check_keys(data),
{:ok, bookmarks} <- validate_bookmarks(Map.get(data, :bookmarks, [])),
{:ok, tx_timeout} <- validate_timeout(Map.get(data, :tx_timeout)),
{:ok, metadata} <- validate_metadata(Map.get(data, :metadata, %{})) do
{:ok,
%__MODULE__{
bookmarks: bookmarks,
tx_timeout: tx_timeout,
metadata: metadata
}}
else
error -> error
end
end
@doc """
Convert the Metadata struct to a map.
All `nil` will be stripped
"""
@spec to_map(Bolt.Sips.Metadata.t()) :: map()
def to_map(metadata) do
with {:ok, metadata} <- check_keys(Map.from_struct(metadata)) do
metadata
|> Map.from_struct()
|> Enum.filter(fn {_, value} -> value != nil end)
|> Enum.into(%{})
else
error -> error
end
end
defp check_keys(data) do
try do
{:ok, struct!(Metadata, data)}
rescue
_ in KeyError -> {:error, "[Metadata] Invalid keys"}
end
end
@spec validate_bookmarks(any()) :: {:ok, list()} | {:ok, nil} | {:error, String.t()}
defp validate_bookmarks(bookmarks)
when (is_list(bookmarks) and length(bookmarks) > 0) or is_nil(bookmarks) do
{:ok, bookmarks}
end
defp validate_bookmarks([]) do
{:ok, nil}
end
defp validate_bookmarks(_) do
{:error, "[Metadata] Invalid bookmkarks. Should be a list."}
end
@spec validate_timeout(any()) :: {:ok, integer()} | {:error, String.t()}
defp validate_timeout(timeout) when (is_integer(timeout) and timeout > 0) or is_nil(timeout) do
{:ok, timeout}
end
defp validate_timeout(nil) do
{:ok, nil}
end
defp validate_timeout(_) do
{:error, "[Metadata] Invalid timeout. Should be a positive integer."}
end
@spec validate_metadata(any()) :: {:ok, map()} | {:ok, nil} | {:error, String.t()}
defp validate_metadata(metadata)
when (is_map(metadata) and map_size(metadata) > 0) or is_nil(metadata) do
{:ok, metadata}
end
defp validate_metadata(%{}) do
{:ok, nil}
end
defp validate_metadata(_) do
{:error, "[Metadata] Invalid timeout. Should be a valid map or nil."}
end
end | lib/bolt_sips/metadata.ex | 0.803829 | 0.492371 | metadata.ex | starcoder |
defmodule Elastix.Bulk do
@moduledoc """
The bulk API makes it possible to perform many index/delete operations in a single API call.
[Elastic documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html)
"""
import Elastix.HTTP, only: [prepare_url: 2]
alias Elastix.{HTTP, JSON}
@doc """
Excepts a list of actions and sources for the `lines` parameter.
## Examples
iex> Elastix.Bulk.post("http://localhost:9200", [%{index: %{_id: "1"}}, %{user: "kimchy"}], index: "twitter", type: "tweet")
{:ok, %HTTPoison.Response{...}}
"""
@spec post(
elastic_url :: String.t(),
lines :: list,
opts :: Keyword.t(),
query_params :: Keyword.t()
) :: HTTP.resp()
def post(elastic_url, lines, options \\ [], query_params \\ []) do
data =
Enum.reduce(lines, [], fn l, acc -> ["\n", JSON.encode!(l) | acc] end)
|> Enum.reverse()
|> IO.iodata_to_binary()
path =
Keyword.get(options, :index)
|> make_path(Keyword.get(options, :type), query_params)
httpoison_options = Keyword.get(options, :httpoison_options, [])
elastic_url
|> prepare_url(path)
|> HTTP.put(data, [], httpoison_options)
end
@doc """
Deprecated: use `post/4` instead.
"""
@spec post_to_iolist(
elastic_url :: String.t(),
lines :: list,
opts :: Keyword.t(),
query_params :: Keyword.t()
) :: HTTP.resp()
def post_to_iolist(elastic_url, lines, options \\ [], query_params \\ []) do
IO.warn(
"This function is deprecated and will be removed in future releases; use Elastix.Bulk.post/4 instead."
)
httpoison_options = Keyword.get(options, :httpoison_options, [])
(elastic_url <>
make_path(Keyword.get(options, :index), Keyword.get(options, :type), query_params))
|> HTTP.put(Enum.map(lines, fn line -> JSON.encode!(line) <> "\n" end), [], httpoison_options)
end
@doc """
Same as `post/4` but instead of sending a list of maps you must send raw binary data in
the format described in the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html).
"""
@spec post_raw(
elastic_url :: String.t(),
raw_data :: String.t(),
opts :: Keyword.t(),
query_params :: Keyword.t()
) :: HTTP.resp()
def post_raw(elastic_url, raw_data, options \\ [], query_params \\ []) do
httpoison_options = Keyword.get(options, :httpoison_options, [])
(elastic_url <>
make_path(Keyword.get(options, :index), Keyword.get(options, :type), query_params))
|> HTTP.put(raw_data, [], httpoison_options)
end
@doc false
def make_path(index_name, type_name, query_params) do
path = make_base_path(index_name, type_name)
case query_params do
[] -> path
_ -> HTTP.append_query_string(path, query_params)
end
end
defp make_base_path(nil, nil), do: "/_bulk"
defp make_base_path(index_name, nil), do: "/#{index_name}/_bulk"
defp make_base_path(index_name, type_name), do: "/#{index_name}/#{type_name}/_bulk"
end | lib/elastix/bulk.ex | 0.839718 | 0.472318 | bulk.ex | starcoder |
defmodule AWS.Synthetics do
@moduledoc """
Amazon CloudWatch Synthetics
You can use Amazon CloudWatch Synthetics to continually monitor your
services. You can create and manage *canaries*, which are modular,
lightweight scripts that monitor your endpoints and APIs from the
outside-in. You can set up your canaries to run 24 hours a day, once per
minute. The canaries help you check the availability and latency of your
web services and troubleshoot anomalies by investigating load time data,
screenshots of the UI, logs, and metrics. The canaries seamlessly integrate
with CloudWatch ServiceLens to help you trace the causes of impacted nodes
in your applications. For more information, see [Using ServiceLens to
Monitor the Health of Your
Applications](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ServiceLens.html)
in the *Amazon CloudWatch User Guide*.
Before you create and manage canaries, be aware of the security
considerations. For more information, see [Security Considerations for
Synthetics
Canaries](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html).
"""
@doc """
Creates a canary. Canaries are scripts that monitor your endpoints and APIs
from the outside-in. Canaries help you check the availability and latency
of your web services and troubleshoot anomalies by investigating load time
data, screenshots of the UI, logs, and metrics. You can set up a canary to
run continuously or just once.
Do not use `CreateCanary` to modify an existing canary. Use
[UpdateCanary](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_UpdateCanary.html)
instead.
To create canaries, you must have the `CloudWatchSyntheticsFullAccess`
policy. If you are creating a new IAM role for the canary, you also need
the the `iam:CreateRole`, `iam:CreatePolicy` and `iam:AttachRolePolicy`
permissions. For more information, see [Necessary Roles and
Permissions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Roles).
Do not include secrets or proprietary information in your canary names. The
canary name makes up part of the Amazon Resource Name (ARN) for the canary,
and the ARN is included in outbound calls over the internet. For more
information, see [Security Considerations for Synthetics
Canaries](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html).
"""
def create_canary(client, input, options \\ []) do
path_ = "/canary"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Permanently deletes the specified canary.
When you delete a canary, resources used and created by the canary are not
automatically deleted. After you delete a canary that you do not intend to
use again, you should also delete the following:
<ul> <li> The Lambda functions and layers used by this canary. These have
the prefix `cwsyn-*MyCanaryName* `.
</li> <li> The CloudWatch alarms created for this canary. These alarms have
a name of `Synthetics-SharpDrop-Alarm-*MyCanaryName* `.
</li> <li> Amazon S3 objects and buckets, such as the canary's artifact
location.
</li> <li> IAM roles created for the canary. If they were created in the
console, these roles have the name `
role/service-role/CloudWatchSyntheticsRole-*MyCanaryName* `.
</li> <li> CloudWatch Logs log groups created for the canary. These logs
groups have the name `/aws/lambda/cwsyn-*MyCanaryName* `.
</li> </ul> Before you delete a canary, you might want to use `GetCanary`
to display the information about this canary. Make note of the information
returned by this operation so that you can delete these resources after you
delete the canary.
"""
def delete_canary(client, name, input, options \\ []) do
path_ = "/canary/#{URI.encode(name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
This operation returns a list of the canaries in your account, along with
full details about each canary.
This operation does not have resource-level authorization, so if a user is
able to use `DescribeCanaries`, the user can see all of the canaries in the
account. A deny policy can only be used to restrict access to all canaries.
It cannot be used on specific resources.
"""
def describe_canaries(client, input, options \\ []) do
path_ = "/canaries"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Use this operation to see information from the most recent run of each
canary that you have created.
"""
def describe_canaries_last_run(client, input, options \\ []) do
path_ = "/canaries/last-run"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Returns a list of Synthetics canary runtime versions. For more information,
see [ Canary Runtime
Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html).
"""
def describe_runtime_versions(client, input, options \\ []) do
path_ = "/runtime-versions"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Retrieves complete information about one canary. You must specify the name
of the canary that you want. To get a list of canaries and their names, use
[DescribeCanaries](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_DescribeCanaries.html).
"""
def get_canary(client, name, options \\ []) do
path_ = "/canary/#{URI.encode(name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Retrieves a list of runs for a specified canary.
"""
def get_canary_runs(client, name, input, options \\ []) do
path_ = "/canary/#{URI.encode(name)}/runs"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Displays the tags associated with a canary.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Use this operation to run a canary that has already been created. The
frequency of the canary runs is determined by the value of the canary's
`Schedule`. To see a canary's schedule, use
[GetCanary](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_GetCanary.html).
"""
def start_canary(client, name, input, options \\ []) do
path_ = "/canary/#{URI.encode(name)}/start"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Stops the canary to prevent all future runs. If the canary is currently
running, Synthetics stops waiting for the current run of the specified
canary to complete. The run that is in progress completes on its own,
publishes metrics, and uploads artifacts, but it is not recorded in
Synthetics as a completed run.
You can use `StartCanary` to start it running again with the canary’s
current schedule at any point in the future.
"""
def stop_canary(client, name, input, options \\ []) do
path_ = "/canary/#{URI.encode(name)}/stop"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Assigns one or more tags (key-value pairs) to the specified canary.
Tags can help you organize and categorize your resources. You can also use
them to scope user permissions, by granting a user permission to access or
change only resources with certain tag values.
Tags don't have any semantic meaning to AWS and are interpreted strictly as
strings of characters.
You can use the `TagResource` action with a canary that already has tags.
If you specify a new tag key for the alarm, this tag is appended to the
list of tags associated with the alarm. If you specify a tag key that is
already associated with the alarm, the new tag value that you specify
replaces the previous value for that tag.
You can associate as many as 50 tags with a canary.
"""
def tag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Removes one or more tags from the specified canary.
"""
def untag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_, input} =
[
{"TagKeys", "tagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Use this operation to change the settings of a canary that has already been
created.
You can't use this operation to update the tags of an existing canary. To
change the tags of an existing canary, use
[TagResource](https://docs.aws.amazon.com/AmazonSynthetics/latest/APIReference/API_TagResource.html).
"""
def update_canary(client, name, input, options \\ []) do
path_ = "/canary/#{URI.encode(name)}"
headers = []
query_ = []
request(client, :patch, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, Poison.Parser.t(), Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "synthetics"}
host = build_host("synthetics", client)
url = host
|> build_url(path, client)
|> add_query(query)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, response}
{:ok, %HTTPoison.Response{status_code: status_code, body: body} = response}
when status_code == 200 or status_code == 202 or status_code == 204 ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} ->
{:ok, %{}, response}
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, []) do
url
end
defp add_query(url, query) do
querystring = AWS.Util.encode_query(query)
"#{url}?#{querystring}"
end
defp encode_payload(input) do
if input != nil, do: Poison.Encoder.encode(input, %{}), else: ""
end
end | lib/aws/synthetics.ex | 0.809953 | 0.595875 | synthetics.ex | starcoder |
defmodule Yyzzy.Enum do
### Protocols and Behaviors: Enum
## Note, this implementation might have to change if we want fully qualified uids..
defimpl Enumerable, for: Yyzzy do
def count(yyzzy) do
{:ok, Enum.reduce(yyzzy,0,fn _x, acc -> acc + 1 end) }
end
@doc """
entities are in a tree like form so membership is done via DFS
"""
def member?(_, nil), do: {:ok, false}
def member?(%Yyzzy{uid: uid}, value) when is_atom(value) and uid == value, do: {:ok, true}
def member?(e, value) when is_atom(value) do
{:ok, Enum.reduce_while(e, false, fn x, _acc ->
case x.uid == value do
true -> {:halt, true}
false -> {:cont, false}
end
end)}
end
def member?(yyzzy, %Yyzzy{uid: value}), do: member?(yyzzy, value)
def member?(_,_), do: {:error, __MODULE__}
@doc """
reduce is done via DFS and has three cases:
Root node, many children
level of nodes which may have children
level of only leafs
"""
def reduce(_, {:halt, acc}, _fun), do: {:halted, acc}
def reduce(yyzzy, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(yyzzy, &1, fun)}
def reduce(y = %Yyzzy{entities: es}, {:cont, acc}, fun) when map_size(es) == 0 do
{:cont, acc} = fun.(y, acc)
{:done, acc}
end
def reduce(y = %Yyzzy{entities: es}, {:cont, acc}, fun) do
new_acc = fun.(y,acc)
[h | rest] = Map.values(es)
rest = for child <- rest, into: %{} do
case child.uid do
{_heirarchy, uid} -> {uid, child}
uid -> {uid, child}
end
end
new_y = case h do
f when is_function(f) ->
f.({:update, fn x -> %Yyzzy{x | entities: Map.merge(x.entities, rest)} end })
f
h -> %Yyzzy{h | entities: Map.merge(h.entities, rest)}
end
reduce(new_y, new_acc, fun)
end
def reduce(y, {:cont, acc}, fun) when is_function(y) do
root = y.(:get)
{:cont, new_acc} = fun.(root, acc)
case Enum.count(root.entities) do
0 -> {:done, new_acc}
n ->
[h | _] = Map.keys(root.entities)
reduce(Yyzzy.retree(root, h), new_acc, fun)
end
end
end
end | lib/yyzzy/enum.ex | 0.610453 | 0.405154 | enum.ex | starcoder |
defmodule ElixirBoilerplateWeb.Errors do
alias Ecto.Changeset
alias ElixirBoilerplateWeb.Errors.View
@doc """
Generates a human-readable block containing all errors in a changeset. Errors
are then localized using translations in the `ecto` domain.
For example, you could have an `ecto.po` file in the french locale:
```
msgid ""
msgstr ""
"Language: fr"
msgid "can't be blank"
msgstr "ne peut être vide"
```
"""
def error_messages(changeset) do
changeset
|> Changeset.traverse_errors(&translate_error/1)
|> convert_errors_to_html(changeset.data.__struct__)
end
defp translate_error({message, options}) do
if options[:count] do
Gettext.dngettext(ElixirBoilerplate.Gettext, "ecto", message, message, options[:count], options)
else
Gettext.dgettext(ElixirBoilerplate.Gettext, "ecto", message, options)
end
end
defp convert_errors_to_html(errors, schema) do
errors = Enum.reduce(errors, [], &convert_error_field(&1, &2, schema))
View.render("error_messages.html", %{errors: errors})
end
defp convert_error_field({field, errors}, memo, schema) when is_list(errors), do: memo ++ Enum.flat_map(errors, &convert_error_subfield(&1, field, [], schema))
defp convert_error_field({field, errors}, memo, schema) when is_map(errors), do: memo ++ Enum.flat_map(Map.keys(errors), &convert_error_subfield(&1, field, errors[&1], schema))
defp convert_error_subfield(message, field, _, _schema) when is_binary(message) do
# NOTE `schema` is available here if we want to use something like
# `schema.humanize_field(field)` to be able to display `"Email address is
# invalid"` instead of `email is invalid"`.
["#{field} #{message}"]
end
defp convert_error_subfield(message, field, memo, schema) when is_map(message) do
Enum.reduce(message, memo, fn {subfield, errors}, memo ->
memo ++ convert_error_field({"#{field}.#{subfield}", errors}, memo, schema)
end)
end
defp convert_error_subfield(subfield, field, errors, schema) do
field = "#{field}.#{subfield}"
convert_error_field({field, errors}, [], schema)
end
end | lib/elixir_boilerplate_web/errors/errors.ex | 0.819424 | 0.622631 | errors.ex | starcoder |
defmodule Flop do
@moduledoc """
Flop is a helper library for filtering, ordering and pagination with Ecto.
## Usage
The simplest way of using this library is just to use
`Flop.validate_and_run/3` and `Flop.validate_and_run!/3`. Both functions
take a queryable and a parameter map, validate the parameters, run the query
and return the query results and the meta information.
iex> Flop.Repo.insert_all(Flop.Pet, [
...> %{name: "Harry", age: 4, species: "C. lupus"},
...> %{name: "Maggie", age: 1, species: "O. cuniculus"},
...> %{name: "Patty", age: 2, species: "C. aegagrus"}
...> ])
iex> params = %{order_by: ["name", "age"], page: 1, page_size: 2}
iex> {:ok, {results, meta}} =
...> Flop.validate_and_run(
...> Flop.Pet,
...> params,
...> repo: Flop.Repo
...> )
iex> Enum.map(results, & &1.name)
["Harry", "Maggie"]
iex> meta.total_count
3
iex> meta.total_pages
2
iex> meta.has_next_page?
true
Under the hood, these functions just call `Flop.validate/2` and `Flop.run/3`,
which in turn calls `Flop.all/3` and `Flop.meta/3`. If you need finer control
about if and when to execute each step, you can call those functions directly.
See `Flop.Meta` for descriptions of the meta fields.
## Global configuration
You can set some global options like the default Ecto repo via the application
environment. All global options can be overridden by passing them directly to
the functions or configuring the options for a schema module via
`Flop.Schema`.
import Config
config :flop, repo: MyApp.Repo
See `t:Flop.option/0` for a description of all available options.
## Schema options
You can set some options for a schema by deriving `Flop.Schema`. The options
are evaluated at the validation step.
defmodule Pet do
use Ecto.Schema
@derive {Flop.Schema,
filterable: [:name, :species],
sortable: [:name, :age],
default_limit: 20,
max_limit: 100}
schema "pets" do
field :name, :string
field :age, :integer
field :species, :string
field :social_security_number, :string
end
end
You need to pass the schema to `Flop.validate/2` or any function that
includes the validation step with the `:for` option.
iex> params = %{"order_by" => ["name", "age"], "limit" => 5}
iex> {:ok, flop} = Flop.validate(params, for: Flop.Pet)
iex> flop.limit
5
iex> params = %{"order_by" => ["name", "age"], "limit" => 10_000}
iex> {:error, meta} = Flop.validate(params, for: Flop.Pet)
iex> [limit: [{msg, _}]] = meta.errors
iex> msg
"must be less than or equal to %{number}"
iex> params = %{"order_by" => ["name", "age"], "limit" => 10_000}
iex> {:error, %Flop.Meta{} = meta} =
...> Flop.validate_and_run(
...> Flop.Pet,
...> params,
...> for: Flop.Pet
...> )
iex> [limit: [{msg, _}]] = meta.errors
iex> msg
"must be less than or equal to %{number}"
## Ordering
To add an ordering clause to a query, you need to set the `:order_by` and
optionally the `:order_directions` parameter. `:order_by` should be the list
of fields, while `:order_directions` is a list of `t:Flop.order_direction/0`.
`:order_by` and `:order_directions` are zipped when generating the `ORDER BY`
clause. If no order directions are given, `:asc` is used as default.
iex> params = %{
...> "order_by" => ["name", "age"],
...> "order_directions" => ["asc", "desc"]
...> }
iex> {:ok, flop} = Flop.validate(params)
iex> flop.order_by
[:name, :age]
iex> flop.order_directions
[:asc, :desc]
Flop uses these two fields instead of a keyword list, so that the order
instructions can be easily passed in a query string.
## Pagination
For queries using `OFFSET` and `LIMIT`, you have the choice between
page-based pagination parameters:
%{page: 5, page_size: 20}
and offset-based pagination parameters:
%{offset: 100, limit: 20}
For cursor-based pagination, you can either use `:first`/`:after` or
`:last`/`:before`. You also need to pass the `:order_by` parameter or set a
default order for the schema via `Flop.Schema`.
iex> Flop.Repo.insert_all(Flop.Pet, [
...> %{name: "Harry", age: 4, species: "C. lupus"},
...> %{name: "Maggie", age: 1, species: "O. cuniculus"},
...> %{name: "Patty", age: 2, species: "C. aegagrus"}
...> ])
iex>
iex> # forward (first/after)
iex>
iex> params = %{first: 2, order_by: [:species, :name]}
iex> {:ok, {results, meta}} = Flop.validate_and_run(Flop.Pet, params)
iex> Enum.map(results, & &1.name)
["Patty", "Harry"]
iex> meta.has_next_page?
true
iex> end_cursor = meta.end_cursor
"g3QAAAACZAAEbmFtZW0AAAAFSGFycnlkAAdzcGVjaWVzbQAAAAhDLiBsdXB1cw=="
iex> params = %{first: 2, after: end_cursor, order_by: [:species, :name]}
iex> {:ok, {results, meta}} = Flop.validate_and_run(Flop.Pet, params)
iex> Enum.map(results, & &1.name)
["Maggie"]
iex> meta.has_next_page?
false
iex>
iex> # backward (last/before)
iex>
iex> params = %{last: 2, order_by: [:species, :name]}
iex> {:ok, {results, meta}} = Flop.validate_and_run(Flop.Pet, params)
iex> Enum.map(results, & &1.name)
["Harry", "Maggie"]
iex> meta.has_previous_page?
true
iex> start_cursor = meta.start_cursor
"g3QAAAACZAAEbmFtZW0AAAAFSGFycnlkAAdzcGVjaWVzbQAAAAhDLiBsdXB1cw=="
iex> params = %{last: 2, before: start_cursor, order_by: [:species, :name]}
iex> {:ok, {results, meta}} = Flop.validate_and_run(Flop.Pet, params)
iex> Enum.map(results, & &1.name)
["Patty"]
iex> meta.has_previous_page?
false
By default, it is assumed that the query result is a list of maps or structs.
If your query returns a different data structure, you can pass the
`:cursor_value_func` option to retrieve the cursor values. See
`t:Flop.option/0` and `Flop.Cursor` for more information.
You can restrict which pagination types are available. See `t:Flop.option/0`
for details.
## Filters
Filters can be passed as a list of maps. It is recommended to define the
filterable fields for a schema using `Flop.Schema`.
iex> Flop.Repo.insert_all(Flop.Pet, [
...> %{name: "Harry", age: 4, species: "C. lupus"},
...> %{name: "Maggie", age: 1, species: "O. cuniculus"},
...> %{name: "Patty", age: 2, species: "C. aegagrus"}
...> ])
iex>
iex> params = %{filters: [%{field: :name, op: :=~, value: "Mag"}]}
iex> {:ok, {results, meta}} = Flop.validate_and_run(Flop.Pet, params)
iex> meta.total_count
1
iex> [pet] = results
iex> pet.name
"Maggie"
See `t:Flop.Filter.op/0` for a list of all available filter operators.
## GraphQL and Relay
The parameters used for cursor-based pagination follow the Relay
specification, so you can just pass the arguments you get from the client on
to Flop.
`Flop.Relay` can convert the query results returned by
`Flop.validate_and_run/3` into `Edges` and `PageInfo` formats required for
Relay connections.
For example, if you have a context module like this:
defmodule MyApp.Flora
import Ecto.query, warn: false
alias MyApp.Flora.Plant
def list_plants_by_continent(%Continent{} = continent, %{} = args) do
Plant
|> where(continent_id: ^continent.id)
|> Flop.validate_and_run(args, for: Plant)
end
end
Then your Absinthe resolver for the `plants` connection may look something
like this:
def list_plants(args, %{source: %Continent{} = continent}) do
with {:ok, result} <-
Flora.list_plants_by_continent(continent, args) do
{:ok, Flop.Relay.connection_from_result(result)}
end
end
"""
use Ecto.Schema
import Ecto.Changeset
alias Ecto.Changeset
alias Ecto.Query
alias Ecto.Queryable
alias Flop.Builder
alias Flop.Cursor
alias Flop.CustomTypes.ExistingAtom
alias Flop.Filter
alias Flop.Meta
require Ecto.Query
require Logger
@typedoc """
Options that can be passed to most of the functions or that can be set via
the application environment.
- `:cursor_value_func` - 2-arity function used to get the (unencoded)
cursor value from a record. Only used with cursor-based pagination. The
first argument is the record, the second argument is the list of fields used
in the `ORDER BY` clause. Needs to return a map with the order fields as
keys and the the record values of these fields as values. Defaults to
`Flop.Cursor.get_cursor_from_node/2`.
- `:default_limit` - Sets a global default limit for queries that is used if
no default limit is set for a schema and no limit is set in the parameters.
Can only be set in the application configuration.
- `:default_pagination_type` - The pagination type to use when setting default
parameters and the pagination type cannot be determined from the parameters.
Parameters for other pagination types can still be passed when setting this
option. To restrict which pagination types can be used, set the
`:pagination_types` option.
- `:filtering` (boolean) - Can be set to `false` to silently ignore filter
parameters.
- `:for` - The schema module to be used for validation. `Flop.Schema` must be
derived for the given module. This option is optional and can not be set
globally. If it is not set, schema specific validation will be omitted. Used
by the validation functions. It is also used to determine which fields are
join and compound fields.
- `:max_limit` - Sets a global maximum limit for queries that is used if no
maximum limit is set for a schema. Can only be set in the application
configuration.
- `:pagination` (boolean) - Can be set to `false` to silently ignore
pagination parameters.
- `:pagination_types` - Defines which pagination types are allowed. Parameters
for other pagination types will not be cast. By default, all pagination
types are allowed. See also `t:Flop.pagination_type/0`.
- `:prefix` - Configures the query to be executed with the given query prefix.
See the Ecto documentation on
["Query prefix"](https://hexdocs.pm/ecto/Ecto.Query.html#module-query-prefix).
- `:ordering` (boolean) - Can be set to `false` to silently ignore order
parameters. Default orders are still applied.
- `:repo` - The Ecto Repo module to use for the database query. Used by all
functions that execute a database query.
All options can be passed directly to the functions. Some of the options can
be set on a schema level via `Flop.Schema`.
All options except `:for` can be set globally via the application environment.
import Config
config :flop,
default_limit: 25,
filtering: false,
cursor_value_func: &MyApp.Repo.get_cursor_value/2,
max_limit: 100,
ordering: false,
pagination_types: [:first, :last, :page],
repo: MyApp.Repo,
prefix: "some-prefix"
The look up order is:
1. option passed to function
2. option set for schema using `Flop.Schema` (only `:max_limit`,
`:default_limit` and `:pagination_types`)
3. option set in global config (except `:for`)
4. default value (only `:cursor_value_func`)
"""
@type option ::
{:cursor_value_func, (any, [atom] -> map)}
| {:default_limit, pos_integer}
| {:default_pagination_type, pagination_type()}
| {:filtering, boolean}
| {:for, module}
| {:max_limit, pos_integer}
| {:ordering, boolean}
| {:pagination, boolean}
| {:pagination_types, [pagination_type()]}
| {:prefix, binary}
| {:repo, module}
@typedoc """
Represents the supported order direction values.
"""
@type order_direction ::
:asc
| :asc_nulls_first
| :asc_nulls_last
| :desc
| :desc_nulls_first
| :desc_nulls_last
@typedoc """
Represents the pagination type.
- `:offset` - pagination using the `offset` and `limit` parameters
- `:page` - pagination using the `page` and `page_size` parameters
- `:first` - cursor-based pagination using the `first` and `after` parameters
- `:last` - cursor-based pagination using the `last` and `before` parameters
"""
@type pagination_type :: :offset | :page | :first | :last
@typedoc """
Represents the query parameters for filtering, ordering and pagination.
### Fields
- `after`: Used for cursor-based pagination. Must be used with `first` or a
default limit.
- `before`: Used for cursor-based pagination. Must be used with `last` or a
default limit.
- `limit`, `offset`: Used for offset-based pagination.
- `first` Used for cursor-based pagination. Can be used alone to begin
pagination or with `after`
- `last` Used for cursor-based pagination.
- `page`, `page_size`: Used for offset-based pagination as an alternative to
`offset` and `limit`.
- `order_by`: List of fields to order by. Fields can be restricted by
deriving `Flop.Schema` in your Ecto schema.
- `order_directions`: List of order directions applied to the fields defined
in `order_by`. If empty or the list is shorter than the `order_by` list,
`:asc` will be used as a default for each missing order direction.
- `filters`: List of filters, see `t:Flop.Filter.t/0`.
"""
@type t :: %__MODULE__{
after: String.t() | nil,
before: String.t() | nil,
filters: [Filter.t()] | nil,
first: pos_integer | nil,
last: pos_integer | nil,
limit: pos_integer | nil,
offset: non_neg_integer | nil,
order_by: [atom | String.t()] | nil,
order_directions: [order_direction()] | nil,
page: pos_integer | nil,
page_size: pos_integer | nil
}
@primary_key false
embedded_schema do
field :after, :string
field :before, :string
field :first, :integer
field :last, :integer
field :limit, :integer
field :offset, :integer
field :order_by, {:array, ExistingAtom}
field :order_directions, {:array, Ecto.Enum},
values: [
:asc,
:asc_nulls_first,
:asc_nulls_last,
:desc,
:desc_nulls_first,
:desc_nulls_last
]
field :page, :integer
field :page_size, :integer
embeds_many :filters, Filter
end
@doc """
Adds clauses for filtering, ordering and pagination to a
`t:Ecto.Queryable.t/0`.
The parameters are represented by the `t:Flop.t/0` type. Any `nil` values
will be ignored.
## Examples
iex> flop = %Flop{limit: 10, offset: 19}
iex> Flop.query(Flop.Pet, flop)
#Ecto.Query<from p0 in Flop.Pet, limit: ^10, offset: ^19>
Or enhance an already defined query:
iex> require Ecto.Query
iex> flop = %Flop{limit: 10}
iex> Flop.Pet |> Ecto.Query.where(species: "dog") |> Flop.query(flop)
#Ecto.Query<from p0 in Flop.Pet, where: p0.species == \"dog\", limit: ^10>
Note that when using cursor-based pagination, the applied limit will be
`first + 1` or `last + 1`. The extra record is removed by `Flop.run/3`.
"""
@spec query(Queryable.t(), Flop.t(), [option()]) :: Queryable.t()
def query(q, %Flop{} = flop, opts \\ []) do
q
|> filter(flop, opts)
|> order_by(flop, opts)
|> paginate(flop, opts)
end
@doc """
Applies the given Flop to the given queryable and returns all matchings
entries.
iex> Flop.all(Flop.Pet, %Flop{}, repo: Flop.Repo)
[]
You can also configure a default repo in your config files:
config :flop, repo: MyApp.Repo
This allows you to omit the third argument:
iex> Flop.all(Flop.Pet, %Flop{})
[]
Note that when using cursor-based pagination, the applied limit will be
`first + 1` or `last + 1`. The extra record is removed by `Flop.run/3`, but
not by this function.
"""
@doc since: "0.6.0"
@spec all(Queryable.t(), Flop.t(), [option()]) :: [any]
def all(q, %Flop{} = flop, opts \\ []) do
apply_on_repo(:all, "all", [query(q, flop, opts)], opts)
end
@doc """
Applies the given Flop to the given queryable, retrieves the data and the
meta data.
This function does not validate the given flop parameters. You can validate
the parameters with `Flop.validate/2` or `Flop.validate!/2`, or you can use
`Flop.validate_and_run/3` or `Flop.validate_and_run!/3` instead of this
function.
iex> {data, meta} = Flop.run(Flop.Pet, %Flop{})
iex> data == []
true
iex> match?(%Flop.Meta{}, meta)
true
"""
@doc since: "0.6.0"
@spec run(Queryable.t(), Flop.t(), [option()]) :: {[any], Meta.t()}
def run(q, flop, opts \\ [])
def run(
q,
%Flop{
before: nil,
first: first,
last: nil
} = flop,
opts
)
when is_integer(first) do
results = all(q, flop, opts)
{Enum.take(results, first), meta(results, flop, opts)}
end
def run(
q,
%Flop{
after: nil,
first: nil,
last: last
} = flop,
opts
)
when is_integer(last) do
results = all(q, flop, opts)
page_data =
results
|> Enum.take(last)
|> Enum.reverse()
{page_data, meta(results, flop, opts)}
end
def run(q, %Flop{} = flop, opts) do
{all(q, flop, opts), meta(q, flop, opts)}
end
@doc """
Validates the given flop parameters and retrieves the data and meta data on
success.
iex> {:ok, {[], %Flop.Meta{}}} =
...> Flop.validate_and_run(Flop.Pet, %Flop{}, for: Flop.Pet)
iex> {:error, %Flop.Meta{} = meta} =
...> Flop.validate_and_run(Flop.Pet, %Flop{limit: -1})
iex> meta.errors
[
limit: [
{"must be greater than %{number}",
[validation: :number, kind: :greater_than, number: 0]}
]
]
## Options
- `for`: Passed to `Flop.validate/2`.
- `repo`: The `Ecto.Repo` module. Required if no default repo is configured.
- `cursor_value_func`: An arity-2 function to be used to retrieve an
unencoded cursor value from a query result item and the `order_by` fields.
Defaults to `Flop.Cursor.get_cursor_from_node/2`.
"""
@doc since: "0.6.0"
@spec validate_and_run(Queryable.t(), map | Flop.t(), [option()]) ::
{:ok, {[any], Meta.t()}} | {:error, Meta.t()}
def validate_and_run(q, map_or_flop, opts \\ []) do
with {:ok, flop} <- validate(map_or_flop, opts) do
{:ok, run(q, flop, opts)}
end
end
@doc """
Same as `Flop.validate_and_run/3`, but raises on error.
"""
@doc since: "0.6.0"
@spec validate_and_run!(Queryable.t(), map | Flop.t(), [option()]) ::
{[any], Meta.t()}
def validate_and_run!(q, map_or_flop, opts \\ []) do
flop = validate!(map_or_flop, opts)
run(q, flop, opts)
end
@doc """
Returns the total count of entries matching the filter conditions of the
Flop.
The pagination and ordering option are disregarded.
iex> Flop.count(Flop.Pet, %Flop{}, repo: Flop.Repo)
0
You can also configure a default repo in your config files:
config :flop, repo: MyApp.Repo
This allows you to omit the third argument:
iex> Flop.count(Flop.Pet, %Flop{})
0
"""
@doc since: "0.6.0"
@spec count(Queryable.t(), Flop.t(), [option()]) :: non_neg_integer
def count(q, %Flop{} = flop, opts \\ []) do
apply_on_repo(:aggregate, "count", [filter(q, flop, opts), :count], opts)
end
@doc """
Returns meta information for the given query and flop that can be used for
building the pagination links.
iex> Flop.meta(Flop.Pet, %Flop{limit: 10}, repo: Flop.Repo)
%Flop.Meta{
current_offset: 0,
current_page: 1,
end_cursor: nil,
flop: %Flop{limit: 10},
has_next_page?: false,
has_previous_page?: false,
next_offset: nil,
next_page: nil,
page_size: 10,
previous_offset: nil,
previous_page: nil,
start_cursor: nil,
total_count: 0,
total_pages: 0
}
The function returns both the current offset and the current page, regardless
of the pagination type. If the offset lies in between pages, the current page
number is rounded up. This means that it is possible that the values for
`current_page` and `next_page` can be identical. This can only occur if you
use offset/limit based pagination with arbitrary offsets, but in that case,
you will use the `previous_offset`, `current_offset` and `next_offset` values
to render the pagination links anyway, so this shouldn't be a problem.
Unless cursor-based pagination is used, this function will run a query to
figure get the total count of matching records.
"""
@doc since: "0.6.0"
@spec meta(Queryable.t() | [any], Flop.t(), [option()]) :: Meta.t()
def meta(query_or_results, flop, opts \\ [])
def meta(
results,
%Flop{
first: first,
order_by: order_by,
before: nil,
last: nil
} = flop,
opts
)
when is_list(results) and is_integer(first) do
{start_cursor, end_cursor} =
results
|> Enum.take(first)
|> Cursor.get_cursors(order_by, opts)
%Meta{
flop: flop,
start_cursor: start_cursor,
end_cursor: end_cursor,
has_next_page?: length(results) > first,
has_previous_page?: !is_nil(flop.after),
page_size: first,
schema: opts[:for]
}
end
def meta(
results,
%Flop{
after: nil,
first: nil,
order_by: order_by,
last: last
} = flop,
opts
)
when is_list(results) and is_integer(last) do
{start_cursor, end_cursor} =
results
|> Enum.take(last)
|> Enum.reverse()
|> Cursor.get_cursors(order_by, opts)
%Meta{
flop: flop,
start_cursor: start_cursor,
end_cursor: end_cursor,
has_next_page?: !is_nil(flop.before),
has_previous_page?: length(results) > last,
page_size: last,
schema: opts[:for]
}
end
def meta(q, %Flop{} = flop, opts) do
repo = option_or_default(opts, :repo) || raise no_repo_error("meta")
opts = Keyword.put(opts, :repo, repo)
total_count = count(q, flop, opts)
page_size = flop.page_size || flop.limit
total_pages = get_total_pages(total_count, page_size)
current_offset = get_current_offset(flop)
current_page = get_current_page(flop, total_pages)
{has_previous_page?, previous_offset, previous_page} =
get_previous(current_offset, current_page, page_size)
{has_next_page?, next_offset, next_page} =
get_next(
current_offset,
current_page,
page_size,
total_count,
total_pages
)
%Meta{
current_offset: current_offset,
current_page: current_page,
flop: flop,
has_next_page?: has_next_page?,
has_previous_page?: has_previous_page?,
next_offset: next_offset,
next_page: next_page,
page_size: page_size,
previous_offset: previous_offset,
previous_page: previous_page,
schema: opts[:for],
total_count: total_count,
total_pages: total_pages
}
end
defp get_previous(offset, current_page, limit) do
has_previous? = offset > 0
previous_offset = if has_previous?, do: max(0, offset - limit), else: nil
previous_page = if current_page > 1, do: current_page - 1, else: nil
{has_previous?, previous_offset, previous_page}
end
defp get_next(_, _, nil = _page_size, _, _) do
{false, nil, nil}
end
defp get_next(current_offset, _, page_size, total_count, _)
when current_offset + page_size >= total_count do
{false, nil, nil}
end
defp get_next(current_offset, current_page, page_size, _, total_pages) do
{true, current_offset + page_size, min(total_pages, current_page + 1)}
end
defp get_total_pages(0, _), do: 0
defp get_total_pages(_, nil), do: 1
defp get_total_pages(total_count, limit), do: ceil(total_count / limit)
defp get_current_offset(%Flop{offset: nil, page: nil}), do: 0
defp get_current_offset(%Flop{offset: nil, page: page, page_size: page_size}),
do: (page - 1) * page_size
defp get_current_offset(%Flop{offset: offset}), do: offset
defp get_current_page(%Flop{offset: nil, page: nil}, _), do: 1
defp get_current_page(%Flop{offset: nil, page: page}, _), do: page
defp get_current_page(%Flop{limit: limit, offset: offset, page: nil}, total),
do: min(ceil(offset / limit) + 1, total)
## Ordering
@doc """
Applies the `order_by` and `order_directions` parameters of a `t:Flop.t/0`
to an `t:Ecto.Queryable.t/0`.
Used by `Flop.query/2`.
"""
@spec order_by(Queryable.t(), Flop.t(), [option()]) :: Queryable.t()
def order_by(q, flop, opts \\ [])
def order_by(q, %Flop{order_by: nil}, _opts), do: q
# For backwards cursor pagination
def order_by(
q,
%Flop{
last: last,
order_by: fields,
order_directions: directions,
first: nil,
after: nil,
offset: nil
},
opts
)
when is_integer(last) do
reversed_order =
fields
|> prepare_order(directions)
|> reverse_ordering()
case opts[:for] do
nil ->
Query.order_by(q, ^reversed_order)
module ->
struct = struct(module)
Enum.reduce(reversed_order, q, fn expr, acc_q ->
Flop.Schema.apply_order_by(struct, acc_q, expr)
end)
end
end
def order_by(
q,
%Flop{order_by: fields, order_directions: directions},
opts
) do
case opts[:for] do
nil ->
Query.order_by(q, ^prepare_order(fields, directions))
module ->
struct = struct(module)
fields
|> prepare_order(directions)
|> Enum.reduce(q, fn expr, acc_q ->
Flop.Schema.apply_order_by(struct, acc_q, expr)
end)
end
end
@spec prepare_order([atom], [order_direction()]) :: [
{order_direction(), atom}
]
defp prepare_order(fields, directions) do
directions = directions || []
field_count = length(fields)
direction_count = length(directions)
directions =
if direction_count < field_count,
do: directions ++ List.duplicate(:asc, field_count - direction_count),
else: directions
Enum.zip(directions, fields)
end
## Pagination
@doc """
Applies the pagination parameters of a `t:Flop.t/0` to an
`t:Ecto.Queryable.t/0`.
The function supports both `offset`/`limit` based pagination and
`page`/`page_size` based pagination.
If you validated the `t:Flop.t/0` with `Flop.validate/1` before, you can be
sure that the given `t:Flop.t/0` only has pagination parameters set for one
pagination method. If you pass an unvalidated `t:Flop.t/0` that has
pagination parameters set for multiple pagination methods, this function
will arbitrarily only apply one of the pagination methods.
Used by `Flop.query/2`.
"""
@spec paginate(Queryable.t(), Flop.t(), [option()]) :: Queryable.t()
def paginate(q, flop, opts \\ [])
def paginate(q, %Flop{limit: limit, offset: offset}, _)
when (is_integer(limit) and limit >= 1) or
(is_integer(offset) and offset >= 0) do
q
|> limit(limit)
|> offset(offset)
end
def paginate(q, %Flop{page: page, page_size: page_size}, _)
when is_integer(page) and is_integer(page_size) and
page >= 1 and page_size >= 1 do
q
|> limit(page_size)
|> offset((page - 1) * page_size)
end
def paginate(
q,
%Flop{
first: first,
after: nil,
before: nil,
last: nil,
limit: nil
},
_
)
when is_integer(first),
do: limit(q, first + 1)
def paginate(
q,
%Flop{
first: first,
after: after_,
order_by: order_by,
order_directions: order_directions,
before: nil,
last: nil,
limit: nil
},
opts
)
when is_integer(first) do
orderings = prepare_order(order_by, order_directions)
q
|> apply_cursor(after_, orderings, opts)
|> limit(first + 1)
end
def paginate(
q,
%Flop{
last: last,
before: before,
order_by: order_by,
order_directions: order_directions,
first: nil,
after: nil,
limit: nil
},
opts
)
when is_integer(last) do
prepared_order_reversed =
order_by
|> prepare_order(order_directions)
|> reverse_ordering()
q
|> apply_cursor(before, prepared_order_reversed, opts)
|> limit(last + 1)
end
def paginate(q, _, _), do: q
## Offset/limit pagination
@spec limit(Queryable.t(), pos_integer | nil) :: Queryable.t()
defp limit(q, nil), do: q
defp limit(q, limit), do: Query.limit(q, ^limit)
@spec offset(Queryable.t(), non_neg_integer | nil) :: Queryable.t()
defp offset(q, nil), do: q
defp offset(q, offset), do: Query.offset(q, ^offset)
## Cursor pagination helpers
@spec apply_cursor(
Queryable.t(),
map() | nil,
[order_direction()],
keyword
) :: Queryable.t()
defp apply_cursor(q, nil, _, _), do: q
defp apply_cursor(q, cursor, ordering, opts) do
cursor = Cursor.decode!(cursor)
where_dynamic =
case opts[:for] do
nil ->
cursor_dynamic(ordering, cursor)
module ->
module
|> struct()
|> Flop.Schema.cursor_dynamic(ordering, cursor)
end
Query.where(q, ^where_dynamic)
end
defp cursor_dynamic([], _), do: true
defp cursor_dynamic([{direction, field}], cursor) do
field_cursor = cursor[field]
if is_nil(field_cursor) do
true
else
case direction do
dir when dir in [:asc, :asc_nulls_first, :asc_nulls_last] ->
Query.dynamic([r], field(r, ^field) > ^field_cursor)
dir when dir in [:desc, :desc_nulls_first, :desc_nulls_last] ->
Query.dynamic([r], field(r, ^field) < ^field_cursor)
end
end
end
defp cursor_dynamic([{direction, field} | [{_, _} | _] = tail], cursor) do
field_cursor = cursor[field]
if is_nil(field_cursor) do
cursor_dynamic(tail, cursor)
else
case direction do
dir when dir in [:asc, :asc_nulls_first, :asc_nulls_last] ->
Query.dynamic(
[r],
field(r, ^field) >= ^field_cursor and
(field(r, ^field) > ^field_cursor or
^cursor_dynamic(tail, cursor))
)
dir when dir in [:desc, :desc_nulls_first, :desc_nulls_last] ->
Query.dynamic(
[r],
field(r, ^field) <= ^field_cursor and
(field(r, ^field) < ^field_cursor or
^cursor_dynamic(tail, cursor))
)
end
end
end
@spec reverse_ordering([order_direction()]) :: [order_direction()]
defp reverse_ordering(order_directions) do
Enum.map(order_directions, fn
{:desc, field} -> {:asc, field}
{:desc_nulls_last, field} -> {:asc_nulls_first, field}
{:desc_nulls_first, field} -> {:asc_nulls_last, field}
{:asc, field} -> {:desc, field}
{:asc_nulls_last, field} -> {:desc_nulls_first, field}
{:asc_nulls_first, field} -> {:desc_nulls_last, field}
end)
end
## Filter
@doc """
Applies the `filter` parameter of a `t:Flop.t/0` to an `t:Ecto.Queryable.t/0`.
Used by `Flop.query/2`.
"""
@spec filter(Queryable.t(), Flop.t(), [option()]) :: Queryable.t()
def filter(q, flop, opt \\ [])
def filter(q, %Flop{filters: nil}, _), do: q
def filter(q, %Flop{filters: []}, _), do: q
def filter(q, %Flop{filters: filters}, opts) when is_list(filters) do
schema_struct =
case opts[:for] do
nil -> nil
module -> struct(module)
end
conditions =
Enum.reduce(filters, true, &Builder.filter(schema_struct, &1, &2))
Query.where(q, ^conditions)
end
## Validation
@doc """
Validates a `t:Flop.t/0`.
## Examples
iex> params = %{"limit" => 10, "offset" => 0, "texture" => "fluffy"}
iex> Flop.validate(params)
{:ok,
%Flop{
filters: [],
limit: 10,
offset: 0,
order_by: nil,
order_directions: nil,
page: nil,
page_size: nil
}}
iex> flop = %Flop{offset: -1}
iex> {:error, %Flop.Meta{} = meta} = Flop.validate(flop)
iex> meta.errors
[
offset: [
{"must be greater than or equal to %{number}",
[validation: :number, kind: :greater_than_or_equal_to, number: 0]}
]
]
It also makes sure that only one pagination method is used.
iex> params = %{limit: 10, offset: 0, page: 5, page_size: 10}
iex> {:error, %Flop.Meta{} = meta} = Flop.validate(params)
iex> meta.errors
[limit: [{"cannot combine multiple pagination types", []}]]
If you derived `Flop.Schema` in your Ecto schema to define the filterable
and sortable fields, you can pass the module name to the function to validate
that only allowed fields are used. The function will also apply any default
values set for the schema.
iex> params = %{"order_by" => ["species"]}
iex> {:error, %Flop.Meta{} = meta} = Flop.validate(params, for: Flop.Pet)
iex> [order_by: [{msg, [_, {_, enum}]}]] = meta.errors
iex> msg
"has an invalid entry"
iex> enum
[:name, :age, :owner_name, :owner_age]
Note that currently, trying to use an existing field that is not allowed as
seen above will result in the error message `has an invalid entry`, while
trying to use a field name that does not exist in the schema (or more
precisely: a field name that doesn't exist as an atom) will result in
the error message `is invalid`. This might change in the future.
"""
@spec validate(Flop.t() | map, [option()]) ::
{:ok, Flop.t()} | {:error, Meta.t()}
def validate(flop_or_map, opts \\ [])
def validate(%Flop{} = flop, opts) do
flop
|> flop_struct_to_map()
|> validate(opts)
end
def validate(%{} = params, opts) do
result =
params
|> Flop.Validation.changeset(opts)
|> apply_action(:replace)
case result do
{:ok, _} = r ->
r
{:error, %Changeset{} = changeset} ->
Logger.debug("Invalid Flop: #{inspect(changeset)}")
{:error,
%Meta{
errors: convert_errors(changeset),
params: convert_params(params),
schema: opts[:for]
}}
end
end
defp convert_errors(changeset) do
changeset
|> Changeset.traverse_errors(& &1)
|> map_to_keyword()
end
defp map_to_keyword(%{} = map) do
Enum.into(map, [], fn {key, value} -> {key, map_to_keyword(value)} end)
end
defp map_to_keyword(list) when is_list(list) do
Enum.map(list, &map_to_keyword/1)
end
defp map_to_keyword(value), do: value
defp flop_struct_to_map(%Flop{} = flop) do
flop
|> Map.from_struct()
|> Map.update!(:filters, &filters_to_maps/1)
|> Enum.reject(fn {_, value} -> is_nil(value) end)
|> Enum.into(%{})
end
defp filters_to_maps(nil), do: nil
defp filters_to_maps(filters) when is_list(filters),
do: Enum.map(filters, &filter_to_map/1)
defp filter_to_map(%Filter{} = filter) do
filter
|> Map.from_struct()
|> Enum.reject(fn {_, value} -> is_nil(value) end)
|> Enum.into(%{})
end
defp filter_to_map(%{} = filter), do: filter
defp convert_params(params) do
params
|> map_to_string_keys()
|> filters_to_list()
end
defp filters_to_list(%{"filters" => filters} = params) when is_map(filters) do
filters =
filters
|> Enum.map(fn {index, filter} -> {String.to_integer(index), filter} end)
|> Enum.sort_by(fn {index, _} -> index end)
|> Enum.map(fn {_, filter} -> filter end)
Map.put(params, "filters", filters)
end
defp filters_to_list(params), do: params
defp map_to_string_keys(%{} = params) do
Enum.into(params, %{}, fn
{key, value} when is_atom(key) ->
{Atom.to_string(key), map_to_string_keys(value)}
{key, value} when is_binary(key) ->
{key, map_to_string_keys(value)}
end)
end
defp map_to_string_keys(values) when is_list(values),
do: Enum.map(values, &map_to_string_keys/1)
defp map_to_string_keys(value), do: value
@doc """
Same as `Flop.validate/2`, but raises an `Ecto.InvalidChangesetError` if the
parameters are invalid.
"""
@doc since: "0.5.0"
@spec validate!(Flop.t() | map, [option()]) :: Flop.t()
def validate!(flop_or_map, opts \\ []) do
case validate(flop_or_map, opts) do
{:ok, flop} ->
flop
{:error, %Meta{errors: errors, params: params}} ->
raise Flop.InvalidParamsError, errors: errors, params: params
end
end
@doc """
Sets the page value of a `Flop` struct while also removing/converting
pagination parameters for other pagination types.
iex> set_page(%Flop{page: 2, page_size: 10}, 6)
%Flop{page: 6, page_size: 10}
iex> set_page(%Flop{limit: 10, offset: 20}, 8)
%Flop{limit: nil, offset: nil, page: 8, page_size: 10}
iex> set_page(%Flop{page: 2, page_size: 10}, "6")
%Flop{page: 6, page_size: 10}
The page number will not be allowed to go below 1.
iex> set_page(%Flop{}, -5)
%Flop{page: 1}
"""
@doc since: "0.12.0"
@spec set_page(Flop.t(), pos_integer | binary) :: Flop.t()
def set_page(%Flop{} = flop, page) when is_integer(page) do
%{
flop
| after: nil,
before: nil,
first: nil,
last: nil,
limit: nil,
offset: nil,
page_size: flop.page_size || flop.limit || flop.first || flop.last,
page: max(page, 1)
}
end
def set_page(%Flop{} = flop, page) when is_binary(page) do
set_page(flop, String.to_integer(page))
end
@doc """
Sets the page of a Flop struct to the previous page, but not less than 1.
## Examples
iex> to_previous_page(%Flop{page: 5})
%Flop{page: 4}
iex> to_previous_page(%Flop{page: 1})
%Flop{page: 1}
iex> to_previous_page(%Flop{page: -2})
%Flop{page: 1}
"""
@doc since: "0.15.0"
@spec to_previous_page(Flop.t()) :: Flop.t()
def to_previous_page(%Flop{page: 1} = flop), do: flop
def to_previous_page(%Flop{page: page} = flop)
when is_integer(page) and page < 1,
do: %{flop | page: 1}
def to_previous_page(%Flop{page: page} = flop) when is_integer(page),
do: %{flop | page: page - 1}
@doc """
Sets the page of a Flop struct to the next page.
If the total number of pages is given as the second argument, the page number
will not be increased if the last page has already been reached. You can get
the total number of pages from the `Flop.Meta` struct.
## Examples
iex> to_next_page(%Flop{page: 5})
%Flop{page: 6}
iex> to_next_page(%Flop{page: 5}, 6)
%Flop{page: 6}
iex> to_next_page(%Flop{page: 6}, 6)
%Flop{page: 6}
iex> to_next_page(%Flop{page: 7}, 6)
%Flop{page: 6}
iex> to_next_page(%Flop{page: -5})
%Flop{page: 1}
"""
@doc since: "0.15.0"
@spec to_next_page(Flop.t(), non_neg_integer | nil) :: Flop.t()
def to_next_page(flop, total_pages \\ nil)
def to_next_page(%Flop{page: page} = flop, _)
when is_integer(page) and page < 0,
do: %{flop | page: 1}
def to_next_page(%Flop{page: page} = flop, nil), do: %{flop | page: page + 1}
def to_next_page(%Flop{page: page} = flop, total_pages)
when is_integer(total_pages) and page < total_pages,
do: %{flop | page: page + 1}
def to_next_page(%Flop{} = flop, total_pages)
when is_integer(total_pages),
do: %{flop | page: total_pages}
@doc """
Sets the offset value of a `Flop` struct while also removing/converting
pagination parameters for other pagination types.
iex> set_offset(%Flop{limit: 10, offset: 10}, 20)
%Flop{offset: 20, limit: 10}
iex> set_offset(%Flop{page: 5, page_size: 10}, 20)
%Flop{limit: 10, offset: 20, page: nil, page_size: nil}
iex> set_offset(%Flop{limit: 10, offset: 10}, "20")
%Flop{offset: 20, limit: 10}
The offset will not be allowed to go below 0.
iex> set_offset(%Flop{}, -5)
%Flop{offset: 0}
"""
@doc since: "0.15.0"
@spec set_offset(Flop.t(), non_neg_integer | binary) :: Flop.t()
def set_offset(%Flop{} = flop, offset) when is_integer(offset) do
%{
flop
| after: nil,
before: nil,
first: nil,
last: nil,
limit: flop.limit || flop.page_size || flop.first || flop.last,
offset: max(offset, 0),
page_size: nil,
page: nil
}
end
def set_offset(%Flop{} = flop, offset) when is_binary(offset) do
set_offset(flop, String.to_integer(offset))
end
@doc """
Sets the offset of a Flop struct to the page depending on the limit.
## Examples
iex> to_previous_offset(%Flop{offset: 20, limit: 10})
%Flop{offset: 10, limit: 10}
iex> to_previous_offset(%Flop{offset: 5, limit: 10})
%Flop{offset: 0, limit: 10}
iex> to_previous_offset(%Flop{offset: -2, limit: 10})
%Flop{offset: 0, limit: 10}
"""
@doc since: "0.15.0"
@spec to_previous_offset(Flop.t()) :: Flop.t()
def to_previous_offset(%Flop{offset: 0} = flop), do: flop
def to_previous_offset(%Flop{offset: offset, limit: limit} = flop)
when is_integer(limit) and is_integer(offset),
do: %{flop | offset: max(offset - limit, 0)}
@doc """
Sets the offset of a Flop struct to the next page depending on the limit.
If the total count is given as the second argument, the offset will not be
increased if the last page has already been reached. You can get the total
count from the `Flop.Meta` struct. If the Flop has an offset beyond the total
count, the offset will be set to the last page.
## Examples
iex> to_next_offset(%Flop{offset: 10, limit: 5})
%Flop{offset: 15, limit: 5}
iex> to_next_offset(%Flop{offset: 15, limit: 5}, 21)
%Flop{offset: 20, limit: 5}
iex> to_next_offset(%Flop{offset: 15, limit: 5}, 20)
%Flop{offset: 15, limit: 5}
iex> to_next_offset(%Flop{offset: 28, limit: 5}, 22)
%Flop{offset: 20, limit: 5}
iex> to_next_offset(%Flop{offset: -5, limit: 20})
%Flop{offset: 0, limit: 20}
"""
@doc since: "0.15.0"
@spec to_next_offset(Flop.t(), non_neg_integer | nil) :: Flop.t()
def to_next_offset(flop, total_count \\ nil)
def to_next_offset(%Flop{limit: limit, offset: offset} = flop, _)
when is_integer(limit) and is_integer(offset) and offset < 0,
do: %{flop | offset: 0}
def to_next_offset(%Flop{limit: limit, offset: offset} = flop, nil)
when is_integer(limit) and is_integer(offset),
do: %{flop | offset: offset + limit}
def to_next_offset(%Flop{limit: limit, offset: offset} = flop, total_count)
when is_integer(limit) and
is_integer(offset) and
is_integer(total_count) and offset >= total_count do
%{flop | offset: (ceil(total_count / limit) - 1) * limit}
end
def to_next_offset(%Flop{limit: limit, offset: offset} = flop, total_count)
when is_integer(limit) and
is_integer(offset) and
is_integer(total_count) do
case offset + limit do
new_offset when new_offset >= total_count -> flop
new_offset -> %{flop | offset: new_offset}
end
end
@doc """
Takes a `Flop.Meta` struct and returns a `Flop` struct with updated cursor
pagination params for going to either the previous or the next page.
See `to_previous_cursor/1` and `to_next_cursor/1` for details.
## Examples
iex> set_cursor(
...> %Flop.Meta{
...> flop: %Flop{first: 5, after: "a"},
...> has_previous_page?: true, start_cursor: "b"
...> },
...> :previous
...> )
%Flop{last: 5, before: "b"}
iex> set_cursor(
...> %Flop.Meta{
...> flop: %Flop{first: 5, after: "a"},
...> has_next_page?: true, end_cursor: "b"
...> },
...> :next
...> )
%Flop{first: 5, after: "b"}
"""
@doc since: "0.15.0"
@spec set_cursor(Meta.t(), :previous | :next) :: Flop.t()
def set_cursor(%Meta{} = meta, :previous), do: to_previous_cursor(meta)
def set_cursor(%Meta{} = meta, :next), do: to_next_cursor(meta)
@doc """
Takes a `Flop.Meta` struct and returns a `Flop` struct with updated cursor
pagination params for going to the previous page.
If there is no previous page, the `Flop` struct is return unchanged.
## Examples
iex> to_previous_cursor(
...> %Flop.Meta{
...> flop: %Flop{first: 5, after: "a"},
...> has_previous_page?: true, start_cursor: "b"
...> }
...> )
%Flop{last: 5, before: "b"}
iex> to_previous_cursor(
...> %Flop.Meta{
...> flop: %Flop{last: 5, before: "b"},
...> has_previous_page?: true, start_cursor: "a"
...> }
...> )
%Flop{last: 5, before: "a"}
iex> to_previous_cursor(
...> %Flop.Meta{
...> flop: %Flop{first: 5, after: "b"},
...> has_previous_page?: false, start_cursor: "a"
...> }
...> )
%Flop{first: 5, after: "b"}
"""
@doc since: "0.15.0"
@spec to_previous_cursor(Meta.t()) :: Flop.t()
def to_previous_cursor(%Meta{flop: flop, has_previous_page?: false}), do: flop
def to_previous_cursor(%Meta{
flop: flop,
has_previous_page?: true,
start_cursor: start_cursor
})
when is_binary(start_cursor) do
%{
flop
| before: start_cursor,
last: flop.last || flop.first || flop.page_size || flop.limit,
after: nil,
first: nil,
page: nil,
page_size: nil,
limit: nil,
offset: nil
}
end
@doc """
Takes a `Flop.Meta` struct and returns a `Flop` struct with updated cursor
pagination params for going to the next page.
If there is no next page, the `Flop` struct is return unchanged.
## Examples
iex> to_next_cursor(
...> %Flop.Meta{
...> flop: %Flop{first: 5, after: "a"},
...> has_next_page?: true, end_cursor: "b"
...> }
...> )
%Flop{first: 5, after: "b"}
iex> to_next_cursor(
...> %Flop.Meta{
...> flop: %Flop{last: 5, before: "b"},
...> has_next_page?: true, end_cursor: "a"
...> }
...> )
%Flop{first: 5, after: "a"}
iex> to_next_cursor(
...> %Flop.Meta{
...> flop: %Flop{first: 5, after: "a"},
...> has_next_page?: false, start_cursor: "b"
...> }
...> )
%Flop{first: 5, after: "a"}
"""
@doc since: "0.15.0"
@spec to_next_cursor(Meta.t()) :: Flop.t()
def to_next_cursor(%Meta{flop: flop, has_next_page?: false}), do: flop
def to_next_cursor(%Meta{
flop: flop,
has_next_page?: true,
end_cursor: end_cursor
})
when is_binary(end_cursor) do
%{
flop
| after: end_cursor,
first: flop.first || flop.last || flop.page_size || flop.limit,
before: nil,
last: nil,
page: nil,
page_size: nil,
limit: nil,
offset: nil
}
end
@doc """
Removes the `after` and `before` cursors from a Flop struct.
## Example
iex> reset_cursors(%Flop{after: "A"})
%Flop{}
iex> reset_cursors(%Flop{before: "A"})
%Flop{}
"""
@doc since: "0.15.0"
@spec reset_cursors(Flop.t()) :: Flop.t()
def reset_cursors(%Flop{} = flop), do: %{flop | after: nil, before: nil}
@doc """
Removes all filters from a Flop struct.
## Example
iex> reset_filters(%Flop{filters: [
...> %Flop.Filter{field: :name, value: "Jim"}
...> ]})
%Flop{filters: []}
"""
@doc since: "0.15.0"
@spec reset_filters(Flop.t()) :: Flop.t()
def reset_filters(%Flop{} = flop), do: %{flop | filters: []}
@doc """
Returns the current order direction for the given field.
## Examples
iex> flop = %Flop{order_by: [:name, :age], order_directions: [:desc]}
iex> current_order(flop, :name)
:desc
iex> current_order(flop, :age)
:asc
iex> current_order(flop, :species)
nil
"""
@doc since: "0.15.0"
@spec current_order(Flop.t(), atom) :: order_direction() | nil
def current_order(
%Flop{order_by: order_by, order_directions: order_directions},
field
)
when is_atom(field) do
get_order_direction(order_directions, get_index(order_by, field))
end
@doc """
Removes the order parameters from a Flop struct.
## Example
iex> reset_order(%Flop{order_by: [:name], order_directions: [:asc]})
%Flop{order_by: nil, order_directions: nil}
"""
@doc since: "0.15.0"
@spec reset_order(Flop.t()) :: Flop.t()
def reset_order(%Flop{} = flop),
do: %{flop | order_by: nil, order_directions: nil}
@doc """
Updates the `order_by` and `order_directions` values of a `Flop` struct.
- If the field is not in the current `order_by` value, it will be prepended to
the list. The order direction for the field will be set to `:asc`.
- If the field is already at the front of the `order_by` list, the order
direction will be reversed.
- If the field is already in the list, but not at the front, it will be moved
to the front and the order direction will be set to `:asc`.
## Example
iex> flop = push_order(%Flop{}, :name)
iex> flop.order_by
[:name]
iex> flop.order_directions
[:asc]
iex> flop = push_order(flop, :age)
iex> flop.order_by
[:age, :name]
iex> flop.order_directions
[:asc, :asc]
iex> flop = push_order(flop, :age)
iex> flop.order_by
[:age, :name]
iex> flop.order_directions
[:desc, :asc]
iex> flop = push_order(flop, :species)
iex> flop.order_by
[:species, :age, :name]
iex> flop.order_directions
[:asc, :desc, :asc]
iex> flop = push_order(flop, :age)
iex> flop.order_by
[:age, :species, :name]
iex> flop.order_directions
[:asc, :asc, :asc]
If a string is passed as the second argument, it will be converted to an atom
using `String.to_existing_atom/1`. If the atom does not exist, the `Flop`
struct will be returned unchanged.
iex> flop = push_order(%Flop{}, "name")
iex> flop.order_by
[:name]
iex> flop = push_order(%Flop{}, "this_atom_does_not_exist")
iex> flop.order_by
nil
Since the pagination cursor depends on the sort order, the `:before` and
`:after` parameters are reset.
iex> push_order(%Flop{order_by: [:id], after: "ABC"}, :name)
%Flop{order_by: [:name, :id], order_directions: [:asc], after: nil}
iex> push_order(%Flop{order_by: [:id], before: "DEF"}, :name)
%Flop{order_by: [:name, :id], order_directions: [:asc], before: nil}
"""
@spec push_order(Flop.t(), atom | String.t()) :: Flop.t()
@doc since: "0.10.0"
def push_order(
%Flop{order_by: order_by, order_directions: order_directions} = flop,
field
)
when is_atom(field) do
previous_index = get_index(order_by, field)
previous_direction = get_order_direction(order_directions, previous_index)
new_direction = new_order_direction(previous_index, previous_direction)
{order_by, order_directions} =
get_new_order(
order_by,
order_directions,
field,
new_direction,
previous_index
)
%{
flop
| after: nil,
before: nil,
order_by: order_by,
order_directions: order_directions
}
end
def push_order(flop, field) when is_binary(field) do
push_order(flop, String.to_existing_atom(field))
rescue
_e in ArgumentError -> flop
end
defp get_index(nil, _field), do: nil
defp get_index(order_by, field), do: Enum.find_index(order_by, &(&1 == field))
defp get_order_direction(_, nil), do: nil
defp get_order_direction(nil, _), do: :asc
defp get_order_direction(directions, index),
do: Enum.at(directions, index, :asc)
defp new_order_direction(0, :asc), do: :desc
defp new_order_direction(0, :asc_nulls_first), do: :desc_nulls_last
defp new_order_direction(0, :asc_nulls_last), do: :desc_nulls_first
defp new_order_direction(0, :desc), do: :asc
defp new_order_direction(0, :desc_nulls_first), do: :asc_nulls_last
defp new_order_direction(0, :desc_nulls_last), do: :asc_nulls_first
defp new_order_direction(_, _), do: :asc
defp get_new_order(
order_by,
order_directions,
field,
new_direction,
previous_index
) do
{order_by, order_directions} =
if previous_index do
{List.delete_at(order_by, previous_index),
List.delete_at(order_directions, previous_index)}
else
{order_by, order_directions}
end
{[field | order_by || []], [new_direction | order_directions || []]}
end
defp apply_on_repo(repo_fn, flop_fn, args, opts) do
repo = option_or_default(opts, :repo) || raise no_repo_error(flop_fn)
opts =
if prefix = option_or_default(opts, :prefix) do
[prefix: prefix]
else
[]
end
apply(repo, repo_fn, args ++ [opts])
end
defp option_or_default(opts, key) do
opts[key] || Application.get_env(:flop, key)
end
@doc """
Returns the option with the given key.
The look-up order is:
1. the keyword list passed as the second argument
2. the schema module that derives `Flop.Schema`, if the passed list includes
the `:for` option
3. the application environment
4. the default passed as the last argument
"""
@doc since: "0.11.0"
@spec get_option(atom, [option()], any) :: any
def get_option(key, opts, default \\ nil) do
case opts[key] do
nil ->
case schema_option(opts[:for], key) do
nil -> global_option(key, default)
v -> v
end
v ->
v
end
end
defp schema_option(module, key)
when is_atom(module) and module != nil and
key in [
:default_limit,
:default_order,
:filterable_fields,
:max_limit,
:pagination_types,
:sortable
] do
apply(Flop.Schema, key, [struct(module)])
end
defp schema_option(_, _), do: nil
defp global_option(key, default) when is_atom(key) do
Application.get_env(:flop, key, default)
end
@doc """
Converts key/value filter parameters at the root of a map, converts them into
a list of filter parameter maps and nests them under the `:filters` key.
The second argument is a list of fields as atoms.
The `opts` argument is passed to `map_to_filter_params/2`.
## Examples
iex> nest_filters(%{name: "Peter", page_size: 10}, [:name])
%{filters: [%{field: :name, op: :==, value: "Peter"}], page_size: 10}
iex> nest_filters(%{"name" => "Peter"}, [:name])
%{"filters" => [%{"field" => "name", "op" => :==, "value" => "Peter"}]}
iex> nest_filters(%{name: "Peter"}, [:name], operators: %{name: :!=})
%{filters: [%{field: :name, op: :!=, value: "Peter"}]}
"""
@doc since: "0.15.0"
def nest_filters(%{} = args, fields, opts \\ []) when is_list(fields) do
fields = fields ++ Enum.map(fields, &Atom.to_string/1)
filters =
args
|> Map.take(fields)
|> map_to_filter_params(opts)
key = if has_atom_keys?(args), do: :filters, else: "filters"
args
|> Map.put(key, filters)
|> Map.drop(fields)
end
defp has_atom_keys?(%{} = map) do
map
|> Map.keys()
|> List.first()
|> is_atom()
end
@doc """
Converts a map of filter conditions into a list of Flop filter params.
The default operator is `:==`. `nil` values are excluded from the result.
iex> map_to_filter_params(%{name: "George", age: 8, species: nil})
[
%{field: :age, op: :==, value: 8},
%{field: :name, op: :==, value: "George"}
]
iex> map_to_filter_params(%{"name" => "George", "age" => 8, "cat" => true})
[
%{"field" => "age", "op" => :==, "value" => 8},
%{"field" => "cat", "op" => :==, "value" => true},
%{"field" => "name", "op" => :==, "value" => "George"}
]
You can optionally pass a mapping from field names to operators as a map
with atom keys.
iex> map_to_filter_params(
...> %{name: "George", age: 8, species: nil},
...> operators: %{name: :ilike_and}
...> )
[
%{field: :age, op: :==, value: 8},
%{field: :name, op: :ilike_and, value: "George"}
]
iex> map_to_filter_params(
...> %{"name" => "George", "age" => 8, "cat" => true},
...> operators: %{name: :ilike_and, age: :<=}
...> )
[
%{"field" => "age", "op" => :<=, "value" => 8},
%{"field" => "cat", "op" => :==, "value" => true},
%{"field" => "name", "op" => :ilike_and, "value" => "George"}
]
"""
@doc since: "0.14.0"
@spec map_to_filter_params(map, keyword) :: [map]
def map_to_filter_params(%{} = map, opts \\ []) do
operators = opts[:operators]
map
|> Stream.reject(fn
{_, nil} -> true
_ -> false
end)
|> Enum.map(fn
{field, value} when is_atom(field) ->
%{
field: field,
op: op_from_mapping(field, operators),
value: value
}
{field, value} when is_binary(field) ->
%{
"field" => field,
"op" => op_from_mapping(field, operators),
"value" => value
}
end)
end
defp op_from_mapping(_field, nil), do: :==
defp op_from_mapping(field, %{} = operators) when is_atom(field) do
Map.get(operators, field, :==)
end
defp op_from_mapping(field, %{} = operators) when is_binary(field) do
atom_key = String.to_existing_atom(field)
Map.get(operators, atom_key, :==)
rescue
ArgumentError -> :==
end
# coveralls-ignore-start
defp no_repo_error(function_name),
do: """
No repo specified. You can specify the repo either by passing it
explicitly:
Flop.#{function_name}(MyApp.Item, %Flop{}, repo: MyApp.Repo)
Or you can configure a default repo in your config:
config :flop, repo: MyApp.Repo
"""
# coveralls-ignore-end
end | lib/flop.ex | 0.806853 | 0.617686 | flop.ex | starcoder |
defmodule PersistentList.Day02 do
alias PersistentList.Day02, as: List
defstruct [:head, :tail]
defimpl String.Chars, for: PersistentList.Day02 do
def to_string(list), do: "[" <> stringify(list) <> "]"
defp stringify(%List{head: nil}), do: ""
defp stringify(
%List{
head: head,
tail: %List{
head: nil
}
}
), do: "#{head}"
defp stringify(%List{head: head, tail: tail}), do: "#{head}, " <> stringify(tail)
end
def new(), do: %List{}
def append(list, item), do: %List{head: item, tail: list}
def prepend(%List{head: nil, tail: nil} = empty, item),
do: empty
|> append(item)
def prepend(%List{head: head, tail: tail}, item),
do: tail
|> prepend(item)
|> append(head)
def concat(%List{head: nil, tail: nil}, other), do: other
def concat(%List{head: head, tail: tail}, other),
do: tail
|> concat(other)
|> append(head)
def drop(%List{head: nil} = empty, _), do: empty
def drop(list, num) when num == 0, do: list
def drop(%List{tail: tail}, num),
do: tail
|> drop(num - 1)
def drop_while(%List{head: nil} = empty, _), do: empty
def drop_while(%List{head: head, tail: tail} = list, predicate), do:
unless predicate.(head),
do: list,
else: tail
|> drop_while(predicate)
def take(%List{head: nil} = empty, _), do: empty
def take(_, num) when num == 0, do: %List{}
def take(%List{head: head, tail: tail}, num),
do: tail
|> take(num - 1)
|> append(head)
def take_while(%List{head: nil} = empty, _), do: empty
def take_while(%List{head: head, tail: tail}, predicate), do:
if predicate.(head),
do: tail
|> take_while(predicate)
|> append(head),
else: %List{}
def filter(%List{head: nil} = empty, _), do: empty
def filter(%List{head: head, tail: tail}, predicate) do
unless predicate.(head),
do: tail
|> filter(predicate)
|> append(head),
else:
tail
|> filter(predicate)
end
end | persistent_list/lib/persistent_list/day02.ex | 0.583441 | 0.461502 | day02.ex | starcoder |
defmodule Meeseeks.Selector.Combinator do
@moduledoc """
Combinator structs package some method for finding related nodes and a
`Meeseeks.Selector` to be run on found nodes.
For instance, the css selector `ul > li` contains the combinator `> li`,
which roughly translates to "find a node's children and match any that are
`li`s."
In Meeseeks, this combinator could be represented as:
```elixir
alias Meeseeks.Selector.Combinator
alias Meeseeks.Selector.Element
%Combinator.ChildElements{
selector: %Element{selectors: [%Element.Tag{value: "li"}]}}
```
When defining a combinator using `use Meeseeks.Selector.Combinator`, the
default implementation of `selector/1` expects the selector to be stored
in field `selector`. If this is different in your struct, you must
implement `selector/1`.
## Examples
```elixir
defmodule Selector.Combinator.Parent do
use Meeseeks.Selector.Combinator
defstruct selector: nil
def next(_combinator, node, _document) do
node.parent
end
end
```
"""
alias Meeseeks.{Document, Selector}
@type t :: struct
@doc """
Invoked in order to find the node or nodes that a combinator wishes its
selector to be run on.
Returns the applicable node or nodes, or `nil` if there are no applicable
nodes.
"""
@callback next(combinator :: t, node :: Document.node_t(), document :: Document.t()) ::
[Document.node_t()]
| Document.node_t()
| nil
| no_return
@doc """
Invoked to return the combinator's selector.
"""
@callback selector(combinator :: t) :: Selector.t()
# next
@doc """
Finds the node or nodes that a combinator wishes its selector to be run on.
Returns the applicable node or nodes, or `nil` if there are no applicable
nodes.
"""
@spec next(t, Document.node_t(), Document.t()) ::
[Document.node_t()] | Document.node_t() | nil | no_return
def next(%{__struct__: struct} = combinator, node, document) do
struct.next(combinator, node, document)
end
# combinator
@doc """
Returns the combinator's selector.
"""
@spec selector(t) :: Selector.t()
def selector(%{__struct__: struct} = combinator) do
struct.selector(combinator)
end
# __using__
@doc false
defmacro __using__(_) do
quote do
@behaviour Selector.Combinator
@impl Selector.Combinator
def next(_, _, _), do: raise("next/3 not implemented")
@impl Selector.Combinator
def selector(combinator), do: combinator.selector
defoverridable next: 3, selector: 1
end
end
end | lib/meeseeks/selector/combinator.ex | 0.900717 | 0.726013 | combinator.ex | starcoder |
defmodule Protobuf.Parser do
defmodule ParserError do
defexception [:message]
end
def parse_files!(files, options \\ []) do
files
|> Enum.flat_map(fn path ->
schema = File.read!(path)
parse!(path, schema, options)
end)
|> finalize!(options)
end
def parse_string!(file, string, options \\ []) do
file
|> parse!(string, options)
|> finalize!(options)
end
defp finalize!(defs, options) do
case :gpb_parse.post_process_all_files(defs, options) do
{:ok, defs} ->
defs
{:error, error} ->
msg =
case error do
[ref_to_undefined_msg_or_enum: {{root_path, field}, type}] ->
type_ref =
type
|> Enum.map(&Atom.to_string/1)
|> Enum.join()
invalid_ref =
[field | root_path]
|> Enum.reverse()
|> Enum.map(&Atom.to_string/1)
|> Enum.join()
"Reference to undefined message or enum #{type_ref} at #{invalid_ref}"
_ ->
Macro.to_string(error)
end
raise ParserError, message: msg
end
end
defp parse(path, string, options) when is_binary(string) or is_list(string) do
case :gpb_scan.string('#{string}') do
{:ok, tokens, _} ->
lines =
string
|> String.split("\n", parts: :infinity)
|> Enum.count()
case :gpb_parse.parse(tokens ++ [{:"$end", lines + 1}]) do
{:ok, defs} ->
:gpb_parse.post_process_one_file(path, defs, options)
error ->
error
end
error ->
error
end
end
defp parse!(path, string, options) do
case parse(path, string, options) do
{:ok, defs} ->
defs
{:error, error} ->
msg =
case error do
[ref_to_undefined_msg_or_enum: {{root_path, field}, type}] ->
type_ref =
type
|> Enum.map(&Atom.to_string/1)
|> Enum.join()
invalid_ref =
[field | root_path]
|> Enum.reverse()
|> Enum.map(&Atom.to_string/1)
|> Enum.join()
"Reference to undefined message or enum #{type_ref} at #{invalid_ref}"
_ when is_binary(error) ->
error
_ ->
Macro.to_string(error)
end
raise ParserError, message: msg
end
end
end | lib/exprotobuf/parser.ex | 0.592077 | 0.485722 | parser.ex | starcoder |
defmodule HelloOperator.Controller.V1.Greeting do
@moduledoc """
HelloOperator: Greeting CRD.
## Kubernetes CRD Spec
By default all CRD specs are assumed from the module name, you can override them using attributes.
### Examples
```
# Kubernetes API version of this CRD, defaults to value in module name
@version "v2alpha1"
# Kubernetes API group of this CRD, defaults to "hello-operator.example.com"
@group "kewl.example.io"
The scope of the CRD. Defaults to `:namespaced`
@scope :cluster
CRD names used by kubectl and the kubernetes API
@names %{
plural: "foos",
singular: "foo",
kind: "Foo"
}
```
## Declare RBAC permissions used by this module
RBAC rules can be declared using `@rule` attribute and generated using `mix bonny.manifest`
This `@rule` attribute is cumulative, and can be declared once for each Kubernetes API Group.
### Examples
```
@rule {apiGroup, resources_list, verbs_list}
@rule {"", ["pods", "secrets"], ["*"]}
@rule {"apiextensions.k8s.io", ["foo"], ["*"]}
```
"""
use Bonny.Controller
@rule {"apps", ["deployments"], ["*"]}
@rule {"", ["services"], ["*"]}
# @group "your-operator.your-domain.com"
# @version "v1"
@scope :namespaced
@names %{
plural: "greetings",
singular: "greeting",
kind: "Greeting"
}
@doc """
Creates a kubernetes `deployment` and `service` that runs a "Hello, World" app.
"""
@spec add(map()) :: :ok | :error
def add(payload) do
resources = parse(payload)
conf = Bonny.Config.kubeconfig()
with :ok <- K8s.Client.post(resources.deployment, conf),
:ok <- K8s.Client.post(resources.service, conf) do
:ok
else
{:error, error} -> {:error, error}
end
end
@doc """
Updates `deployment` and `service` resources.
"""
@spec modify(map()) :: :ok | :error
def modify(payload) do
resources = parse(payload)
conf = Bonny.Config.kubeconfig()
with :ok <- K8s.Client.patch(resources.deployment, conf),
:ok <- K8s.Client.patch(resources.service, conf) do
:ok
else
{:error, error} -> {:error, error}
end
end
@doc """
Deletes `deployment` and `service` resources.
"""
@spec delete(map()) :: :ok | :error
def delete(payload) do
resources = parse(payload)
conf = Bonny.Config.kubeconfig()
with :ok <- K8s.Client.delete(resources.deployment, conf),
:ok <- K8s.Client.delete(resources.service, conf) do
:ok
else
{:error, error} -> {:error, error}
end
end
defp parse(%{"metadata" => %{"name" => name, "namespace" => ns}, "spec" => %{"greeting" => greeting}}) do
deployment = gen_deployment(ns, name, greeting)
service = gen_service(ns, name, greeting)
%{
deployment: deployment,
service: service
}
end
defp gen_service(ns, name, greeting) do
%{
apiVersion: "v1",
kind: "Service",
metadata: %{
name: name,
namespace: ns,
labels: %{app: name}
},
spec: %{
ports: [%{port: 5000, protocol: "TCP"}],
selector: %{app: name},
type: "NodePort"
}
}
end
defp gen_deployment(ns, name, greeting) do
%{
apiVersion: "apps/v1",
kind: "Deployment",
metadata: %{
name: name,
namespace: ns,
labels: %{app: name}
},
spec: %{
replicas: 2,
selector: %{
matchLabels: %{app: name}
},
template: %{
metadata: %{
labels: %{app: name}
},
spec: %{
containers: [
%{
name: name,
image: "quay.io/coryodaniel/greeting-server",
env: [%{name: "GREETING", value: greeting}],
ports: [%{containerPort: 5000}]
}
]
}
}
}
}
end
end | lib/hello_operator/controllers/v1/greeting.ex | 0.88499 | 0.824108 | greeting.ex | starcoder |
End of preview. Expand
in Dataset Viewer.
Dataset Card for "clean_code_data"
High quality code data filtering from the stack and pypi. The stack data is pulled from starcoder data, so it has some filtering applied already.
All data was cleaned to remove code licenses and other headers, and filtered for quality and learning value.
- Downloads last month
- 334