Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to latest Nx #311

Merged
merged 2 commits into from
Nov 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 19 additions & 18 deletions lib/scholar/cluster/affinity_propagation.ex
Original file line number Diff line number Diff line change
Expand Up @@ -103,18 +103,18 @@ defmodule Scholar.Cluster.AffinityPropagation do
iex> x = Nx.tensor([[12,5,78,2], [9,3,81,-2], [-1,3,6,1], [1,-2,5,2]])
iex> Scholar.Cluster.AffinityPropagation.fit(x, key: key)
%Scholar.Cluster.AffinityPropagation{
labels: Nx.tensor([0, 0, 2, 2]),
cluster_centers_indices: Nx.tensor([0, -1, 2, -1]),
cluster_centers: Nx.tensor(
labels: Nx.s32([0, 0, 2, 2]),
cluster_centers_indices: Nx.s32([0, -1, 2, -1]),
cluster_centers: Nx.f32(
[
[12.0, 5.0, 78.0, 2.0],
[:infinity, :infinity, :infinity, :infinity],
[-1.0, 3.0, 6.0, 1.0],
[:infinity, :infinity, :infinity, :infinity]
]
),
num_clusters: Nx.tensor(2, type: :u64),
iterations: Nx.tensor(22, type: :s64)
num_clusters: Nx.u32(2),
iterations: Nx.u32(22)
}
"""
deftransform fit(data, opts \\ []) do
Expand All @@ -125,7 +125,7 @@ defmodule Scholar.Cluster.AffinityPropagation do

defnp fit_n(data, key, opts) do
data = to_float(data)
iterations = opts[:iterations]
iterations = opts[:iterations] |> Nx.as_type(:u32)
damping_factor = opts[:damping_factor]
converge_after = opts[:converge_after]
n = Nx.axis_size(data, 0)
Expand All @@ -146,7 +146,7 @@ defmodule Scholar.Cluster.AffinityPropagation do
stop = Nx.u8(0)

{{a, r, it}, _} =
while {{a = zero_n, r = zero_n, i = 0}, {s, range, stop, e}},
while {{a = zero_n, r = zero_n, i = Nx.u32(0)}, {s, range, stop, e}},
i < iterations and not stop do
temp = a + s
indices = Nx.argmax(temp, axis: 1)
Expand Down Expand Up @@ -204,7 +204,7 @@ defmodule Scholar.Cluster.AffinityPropagation do

indices =
Nx.select(mask, Nx.iota(Nx.shape(diagonals)), -1)
|> Nx.as_type({:s, 64})
|> Nx.as_type(:s32)

cluster_centers =
Nx.select(
Expand All @@ -216,15 +216,14 @@ defmodule Scholar.Cluster.AffinityPropagation do
labels =
Nx.broadcast(mask, Nx.shape(s))
|> Nx.select(s, Nx.Constants.neg_infinity(Nx.type(s)))
|> Nx.argmax(axis: 1)
|> Nx.as_type({:s, 64})
|> Nx.argmax(axis: 1, type: :s32)

labels = Nx.select(mask, Nx.iota(Nx.shape(labels)), labels)

{cluster_centers, indices, labels}
else
{Nx.tensor(-1, type: Nx.type(data)), Nx.broadcast(Nx.tensor(-1, type: :s64), {n}),
Nx.broadcast(Nx.tensor(-1, type: :s64), {n})}
{Nx.tensor(-1, type: Nx.type(data)), Nx.broadcast(Nx.tensor(-1, type: :s32), {n}),
Nx.broadcast(Nx.tensor(-1, type: :s32), {n})}
end

%__MODULE__{
Expand Down Expand Up @@ -262,16 +261,16 @@ defmodule Scholar.Cluster.AffinityPropagation do
iex> model = Scholar.Cluster.AffinityPropagation.fit(x, key: key)
iex> Scholar.Cluster.AffinityPropagation.prune(model)
%Scholar.Cluster.AffinityPropagation{
labels: Nx.tensor([0, 0, 1, 1]),
cluster_centers_indices: Nx.tensor([0, 2]),
labels: Nx.s32([0, 0, 1, 1]),
cluster_centers_indices: Nx.s32([0, 2]),
cluster_centers: Nx.tensor(
[
[12.0, 5.0, 78.0, 2.0],
[-1.0, 3.0, 6.0, 1.0]
]
),
num_clusters: Nx.tensor(2, type: :u64),
iterations: Nx.tensor(22, type: :s64)
num_clusters: Nx.u32(2),
iterations: Nx.u32(22)
}
"""
def prune(
Expand All @@ -293,7 +292,9 @@ defmodule Scholar.Cluster.AffinityPropagation do
end)

mapping = Map.new(mapping)
cluster_centers_indices = Nx.tensor(Enum.reverse(indices))

cluster_centers_indices =
Nx.tensor(Enum.reverse(indices), type: Nx.type(cluster_centers_indices))

%__MODULE__{
model
Expand All @@ -314,7 +315,7 @@ defmodule Scholar.Cluster.AffinityPropagation do
iex> model = Scholar.Cluster.AffinityPropagation.prune(model)
iex> Scholar.Cluster.AffinityPropagation.predict(model, Nx.tensor([[10,3,50,6], [8,3,8,2]]))
#Nx.Tensor<
s64[2]
s32[2]
[0, 1]
>
"""
Expand Down
12 changes: 6 additions & 6 deletions lib/scholar/cluster/optics.ex
Original file line number Diff line number Diff line change
Expand Up @@ -75,32 +75,32 @@ defmodule Scholar.Cluster.OPTICS do
iex> x = Nx.tensor([[1, 2], [2, 5], [3, 6], [8, 7], [8, 8], [7, 3]])
iex> Scholar.Cluster.OPTICS.fit(x, min_samples: 2).labels
#Nx.Tensor<
s64[6]
s32[6]
[-1, -1, -1, -1, -1, -1]
>
iex> Scholar.Cluster.OPTICS.fit(x, eps: 4.5, min_samples: 2).labels
#Nx.Tensor<
s64[6]
s32[6]
[0, 0, 0, 1, 1, 1]
>
iex> Scholar.Cluster.OPTICS.fit(x, eps: 2, min_samples: 2).labels
#Nx.Tensor<
s64[6]
s32[6]
[-1, 0, 0, 1, 1, -1]
>
iex> Scholar.Cluster.OPTICS.fit(x, eps: 2, min_samples: 2, algorithm: :kd_tree, metric: {:minkowski, 1}).labels
#Nx.Tensor<
s64[6]
s32[6]
[-1, 0, 0, 1, 1, -1]
>
iex> Scholar.Cluster.OPTICS.fit(x, eps: 1, min_samples: 2).labels
#Nx.Tensor<
s64[6]
s32[6]
[-1, -1, -1, 0, 0, -1]
>
iex> Scholar.Cluster.OPTICS.fit(x, eps: 4.5, min_samples: 3).labels
#Nx.Tensor<
s64[6]
s32[6]
[0, 0, 0, 1, 1, -1]
>
"""
Expand Down
19 changes: 9 additions & 10 deletions lib/scholar/linear/isotonic_regression.ex
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,6 @@ defmodule Scholar.Linear.IsotonicRegression do
{sample_weights, opts} = Keyword.pop(opts, :sample_weights, 1.0)
x_type = to_float_type(x)
x = to_float(x)

y = to_float(y)

sample_weights =
Expand Down Expand Up @@ -202,7 +201,7 @@ defmodule Scholar.Linear.IsotonicRegression do
@doc """
Makes predictions with the given `model` on input `x` and interpolating `function`.

Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`.
Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`.
Otherwise, predictions match train target shape.

## Examples
Expand Down Expand Up @@ -443,19 +442,19 @@ defmodule Scholar.Linear.IsotonicRegression do
end

defnp contiguous_isotonic_regression(y, sample_weights, max_size, increasing) do
y_size = if increasing, do: max_size, else: Nx.axis_size(y, 0) - 1
y_size = if(increasing, do: max_size, else: Nx.axis_size(y, 0) - 1) |> Nx.as_type(:u32)
y = if increasing, do: y, else: Nx.reverse(y)
sample_weights = if increasing, do: sample_weights, else: Nx.reverse(sample_weights)

target = Nx.iota({Nx.axis_size(y, 0)}, type: :s64)
target = Nx.iota({Nx.axis_size(y, 0)}, type: :u32)
type_wy = Nx.Type.merge(Nx.type(y), Nx.type(sample_weights))
i = if increasing, do: 0, else: Nx.axis_size(y, 0) - 1 - max_size
i = if(increasing, do: 0, else: Nx.axis_size(y, 0) - 1 - max_size) |> Nx.as_type(:u32)

{{y, target}, _} =
while {{y, target},
{i, sample_weights, sum_w = Nx.tensor(0, type: Nx.type(sample_weights)),
sum_wy = Nx.tensor(0, type: type_wy), prev_y = Nx.tensor(0, type: type_wy), _k = 0,
terminating_flag = 0, y_size}},
sum_wy = Nx.tensor(0, type: type_wy), prev_y = Nx.tensor(0, type: type_wy),
_k = Nx.u32(0), terminating_flag = Nx.u8(0), y_size}},
i < y_size + 1 and not terminating_flag do
k = target[i] + 1

Expand Down Expand Up @@ -509,12 +508,12 @@ defmodule Scholar.Linear.IsotonicRegression do
end
end

i = if increasing, do: 0, else: Nx.axis_size(y, 0) - 1 - max_size
i = if(increasing, do: 0, else: Nx.axis_size(y, 0) - 1 - max_size) |> Nx.as_type(:u32)

{y, _} =
while {y, {target, i, _k = 0, max_size}}, i < max_size + 1 do
while {y, {target, i, _k = Nx.u32(0), max_size}}, i < max_size + 1 do
k = target[i] + 1
indices = Nx.iota({Nx.axis_size(y, 0)})
indices = Nx.iota({Nx.axis_size(y, 0)}, type: :u32)
in_range? = Nx.logical_and(i + 1 <= indices, indices < k)
y = Nx.select(in_range?, y[i], y)
i = k
Expand Down
4 changes: 2 additions & 2 deletions lib/scholar/linear/logistic_regression.ex
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ defmodule Scholar.Linear.LogisticRegression do
@doc """
Makes predictions with the given `model` on inputs `x`.

Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`.
Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`.

## Examples

Expand All @@ -220,7 +220,7 @@ defmodule Scholar.Linear.LogisticRegression do
iex> model = Scholar.Linear.LogisticRegression.fit(x, y, num_classes: 2)
iex> Scholar.Linear.LogisticRegression.predict(model, Nx.tensor([[-3.0, 5.0]]))
#Nx.Tensor<
s64[1]
s32[1]
[1]
>
"""
Expand Down
10 changes: 5 additions & 5 deletions lib/scholar/linear/polynomial_regression.ex
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,8 @@ defmodule Scholar.Linear.PolynomialRegression do
@doc """
Makes predictions with the given `model` on input `x`.

Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`.
Otherwise, predictions match train target shape.
Output predictions have shape `{n_samples}` when train target is shaped either `{n_samples}` or `{n_samples, 1}`.
Otherwise, predictions match train target shape.

## Examples

Expand Down Expand Up @@ -148,7 +148,7 @@ defmodule Scholar.Linear.PolynomialRegression do
iex> x = Nx.tensor([[2]])
iex> Scholar.Linear.PolynomialRegression.transform(x, degree: 5, fit_intercept?: false)
#Nx.Tensor<
s64[1][5]
s32[1][5]
[
[2, 4, 8, 16, 32]
]
Expand All @@ -157,7 +157,7 @@ defmodule Scholar.Linear.PolynomialRegression do
iex> x = Nx.tensor([[2, 3]])
iex> Scholar.Linear.PolynomialRegression.transform(x)
#Nx.Tensor<
s64[1][6]
s32[1][6]
[
[1, 2, 3, 4, 6, 9]
]
Expand All @@ -166,7 +166,7 @@ defmodule Scholar.Linear.PolynomialRegression do
iex> x = Nx.iota({3, 2})
iex> Scholar.Linear.PolynomialRegression.transform(x, fit_intercept?: false)
#Nx.Tensor<
s64[3][5]
s32[3][5]
[
[0, 1, 0, 0, 1],
[2, 3, 4, 6, 9],
Expand Down
2 changes: 1 addition & 1 deletion lib/scholar/linear/svm.ex
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ defmodule Scholar.Linear.SVM do
iex> model = Scholar.Linear.SVM.fit(x, y, num_classes: 2)
iex> Scholar.Linear.SVM.predict(model, Nx.tensor([[-3.0, 5.0]]))
#Nx.Tensor<
s64[1]
s32[1]
[1]
>
"""
Expand Down
18 changes: 12 additions & 6 deletions lib/scholar/manifold/trimap.ex
Original file line number Diff line number Diff line change
Expand Up @@ -153,14 +153,14 @@ defmodule Scholar.Manifold.Trimap do

# binsearch which checks if the elements of tensor1 are in tensor2
{is_in, _} =
while {is_in, {tensor1, tensor2, prev = Nx.s64(-1), i = Nx.s64(0)}}, i < Nx.size(tensor1) do
while {is_in, {tensor1, tensor2, prev = Nx.s64(-1), i = Nx.u32(0)}}, i < Nx.size(tensor1) do
if i > 0 and prev == tensor1[i] do
is_in = Nx.indexed_put(is_in, Nx.new_axis(i, 0), is_in[i - 1])
{is_in, {tensor1, tensor2, prev, i + 1}}
else
{found?, _} =
while {stop = Nx.u8(0),
{tensor1, tensor2, left = Nx.s64(0), right = Nx.size(tensor2) - 1, i}},
{tensor1, tensor2, left = Nx.s64(0), right = Nx.s64(Nx.size(tensor2) - 1), i}},
left <= right and not stop do
mid = div(left + right, 2)

Expand Down Expand Up @@ -188,13 +188,19 @@ defmodule Scholar.Manifold.Trimap do
final_samples = Nx.broadcast(Nx.s64(0), shape)

{final_samples, key, _, _} =
while {final_samples, key, rejects, i = Nx.s64(0)}, i < elem(shape, 0) do
{samples, key} = Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)})
while {final_samples, key, rejects, i = Nx.u32(0)}, i < elem(shape, 0) do
# TODO: See if we can relax the samples to u32
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@msluszniak I kept the type here as s64 because if we change the type, we change the samples, which changes the result of the tests. Can you please tell me if you copied the tests from Python's trimap? Because if not, it is ok for us to change the values here and then update the tests. :)

{samples, key} =
Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)}, type: :s64)

discard = in1d(samples, rejects[i])

{samples, key, _, _, _} =
while {samples, key, discard, rejects, i}, Nx.any(discard) do
{new_samples, key} = Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)})
# TODO: See if we can relax the samples to u32
{new_samples, key} =
Nx.Random.randint(key, 0, opts[:maxval], shape: {elem(shape, 1)}, type: :s64)

discard = in1d(new_samples, rejects[i]) or in1d(new_samples, samples)
samples = Nx.select(discard, samples, new_samples)
{samples, key, in1d(samples, rejects[i]), rejects, i}
Expand Down Expand Up @@ -552,7 +558,7 @@ defmodule Scholar.Manifold.Trimap do
gain = Nx.broadcast(Nx.tensor(1.0, type: to_float_type(embeddings)), Nx.shape(embeddings))

{embeddings, _} =
while {embeddings, {vel, gain, lr, triplets, weights, i = Nx.s64(0)}},
while {embeddings, {vel, gain, lr, triplets, weights, i = Nx.u32(0)}},
i < opts[:num_iters] do
gamma = if i < @switch_iter, do: @init_momentum, else: @final_momentum

Expand Down
Loading
Loading