Skip to content

Commit

Permalink
Merge pull request #112 from maciejkula/pytorch-v0.4.0
Browse files Browse the repository at this point in the history
Upgrade to Pytorch v0.4.0
  • Loading branch information
maciejkula authored May 20, 2018
2 parents 5470ae5 + 2cd02d2 commit c3bae11
Show file tree
Hide file tree
Showing 15 changed files with 51 additions and 60 deletions.
2 changes: 1 addition & 1 deletion .travis/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ source activate conda_env
echo "Installing dependencies."
conda install -y -q numpy scipy requests h5py scikit-learn pytest flake8
echo "Installing PyTorch."
conda install -y -q -c pytorch pytorch-cpu=0.3.1
conda install -y -q -c pytorch pytorch=0.4.0

# Pushing docs
echo "Installing Sphinx."
Expand Down
4 changes: 2 additions & 2 deletions appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ build: false

environment:
global:
REQUIREMENTS: "numpy scipy requests h5py scikit-learn pytest flake8 pytorch-cpu=0.3.1"
REQUIREMENTS: "numpy scipy requests h5py scikit-learn pytest flake8 pytorch-cpu=0.4.0"

matrix:
- PYTHON_VERSION: 3.6
Expand All @@ -15,7 +15,7 @@ init:
install:
- "set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%PATH%"
- conda config --set always_yes yes --set changeps1 no
- conda create -n spotlight-env -c peterjc123 %REQUIREMENTS% python=%PYTHON_VERSION%
- conda create -n spotlight-env -c pytorch %REQUIREMENTS% python=%PYTHON_VERSION%
- activate spotlight-env
- pip install torchvision
- pip install -e .
Expand Down
8 changes: 8 additions & 0 deletions docs/changelog.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,14 @@
Changelog
=========

v0.1.5 (2018-05-20)
-------------------

Other
~~~~~

* Migration to PyTorch v0.4.0.

v0.1.4 (2018-02-18)
-------------------

Expand Down
2 changes: 1 addition & 1 deletion docs/readme.rst
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ Installation

.. code-block:: python
conda install -c maciejkula -c pytorch -c peterjc123 spotlight=0.1.4
conda install -c maciejkula -c pytorch spotlight=0.1.5
Usage
Expand Down
2 changes: 1 addition & 1 deletion readme.rst
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ Installation

.. code-block:: python
conda install -c maciejkula -c pytorch -c peterjc123 spotlight=0.1.4
conda install -c maciejkula -c pytorch spotlight=0.1.5
Usage
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
name='spotlight',
version=version,
packages=find_packages(),
install_requires=['torch>=0.4.0'],
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
Expand Down
2 changes: 1 addition & 1 deletion spotlight/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = 'v0.1.4'
__version__ = 'v0.1.5'
5 changes: 2 additions & 3 deletions spotlight/factorization/_components.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import numpy as np

import torch
from torch.autograd import Variable

from spotlight.torch_utils import gpu

Expand All @@ -20,7 +19,7 @@ def _predict_process_ids(user_ids, item_ids, num_items, use_cuda):
if item_ids.size()[0] != user_ids.size(0):
user_ids = user_ids.expand(item_ids.size())

user_var = Variable(gpu(user_ids, use_cuda))
item_var = Variable(gpu(item_ids, use_cuda))
user_var = gpu(user_ids, use_cuda)
item_var = gpu(item_ids, use_cuda)

return user_var.squeeze(), item_var.squeeze()
14 changes: 4 additions & 10 deletions spotlight/factorization/explicit.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@

import torch.optim as optim

from torch.autograd import Variable

from spotlight.helpers import _repr_model
from spotlight.factorization._components import _predict_process_ids
from spotlight.factorization.representations import BilinearNet
Expand Down Expand Up @@ -222,19 +220,15 @@ def fit(self, interactions, verbose=False):
ratings_tensor,
batch_size=self._batch_size)):

user_var = Variable(batch_user)
item_var = Variable(batch_item)
ratings_var = Variable(batch_ratings)

predictions = self._net(user_var, item_var)
predictions = self._net(batch_user, batch_item)

if self._loss == 'poisson':
predictions = torch.exp(predictions)

self._optimizer.zero_grad()

loss = self._loss_func(ratings_var, predictions)
epoch_loss += loss.data[0]
loss = self._loss_func(batch_ratings, predictions)
epoch_loss += loss.item()

loss.backward()
self._optimizer.step()
Expand Down Expand Up @@ -287,4 +281,4 @@ def predict(self, user_ids, item_ids=None):
elif self._loss == 'logistic':
out = torch.sigmoid(out)

return cpu(out.data).numpy().flatten()
return cpu(out).detach().numpy().flatten()
23 changes: 9 additions & 14 deletions spotlight/factorization/implicit.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@

import torch.optim as optim

from torch.autograd import Variable

from spotlight.helpers import _repr_model
from spotlight.factorization._components import _predict_process_ids
from spotlight.losses import (adaptive_hinge_loss,
Expand Down Expand Up @@ -228,20 +226,18 @@ def fit(self, interactions, verbose=False):
item_ids_tensor,
batch_size=self._batch_size)):

user_var = Variable(batch_user)
item_var = Variable(batch_item)
positive_prediction = self._net(user_var, item_var)
positive_prediction = self._net(batch_user, batch_item)

if self._loss == 'adaptive_hinge':
negative_prediction = self._get_multiple_negative_predictions(
user_var, n=self._num_negative_samples)
batch_user, n=self._num_negative_samples)
else:
negative_prediction = self._get_negative_prediction(user_var)
negative_prediction = self._get_negative_prediction(batch_user)

self._optimizer.zero_grad()

loss = self._loss_func(positive_prediction, negative_prediction)
epoch_loss += loss.data[0]
epoch_loss += loss.item()

loss.backward()
self._optimizer.step()
Expand All @@ -261,9 +257,8 @@ def _get_negative_prediction(self, user_ids):
self._num_items,
len(user_ids),
random_state=self._random_state)
negative_var = Variable(
gpu(torch.from_numpy(negative_items), self._use_cuda)
)
negative_var = gpu(torch.from_numpy(negative_items), self._use_cuda)

negative_prediction = self._net(user_ids, negative_var)

return negative_prediction
Expand All @@ -273,9 +268,9 @@ def _get_multiple_negative_predictions(self, user_ids, n=5):
batch_size = user_ids.size(0)

negative_prediction = self._get_negative_prediction(user_ids
.resize(batch_size, 1)
.view(batch_size, 1)
.expand(batch_size, n)
.resize(batch_size * n))
.reshape(batch_size * n))

return negative_prediction.view(n, len(user_ids))

Expand Down Expand Up @@ -313,4 +308,4 @@ def predict(self, user_ids, item_ids=None):

out = self._net(user_ids, item_ids)

return cpu(out.data).numpy().flatten()
return cpu(out).detach().numpy().flatten()
12 changes: 5 additions & 7 deletions spotlight/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@
import torch
import torch.nn as nn

from torch.autograd import Variable


SEEDS = [
179424941, 179425457, 179425907, 179426369,
Expand Down Expand Up @@ -226,18 +224,18 @@ def forward(self, indices):
if (self._offsets is None or
self._offsets.size(0) != (batch_size * seq_size)):

self._offsets = Variable(torch.arange(0,
indices.numel(),
indices.size(1)).long())
self._offsets = torch.arange(0,
indices.numel(),
indices.size(1)).long()

if indices.is_cuda:
self._offsets = self._offsets.cuda()

hashed_indices = Variable(self._get_hashed_indices(indices))
hashed_indices = self._get_hashed_indices(indices)
embedding = self.embeddings(hashed_indices.view(-1), self._offsets)
embedding = embedding.view(batch_size, seq_size, -1)
else:
hashed_indices = Variable(self._get_hashed_indices(indices))
hashed_indices = self._get_hashed_indices(indices)

embedding = self.embeddings(hashed_indices)
embedding = embedding.sum(1)
Expand Down
17 changes: 7 additions & 10 deletions spotlight/sequence/implicit.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@

import torch.optim as optim

from torch.autograd import Variable

from spotlight.helpers import _repr_model
from spotlight.losses import (adaptive_hinge_loss,
bpr_loss,
Expand Down Expand Up @@ -227,7 +225,7 @@ def fit(self, interactions, verbose=False):
for minibatch_num, batch_sequence in enumerate(minibatch(sequences_tensor,
batch_size=self._batch_size)):

sequence_var = Variable(batch_sequence)
sequence_var = batch_sequence

user_representation, _ = self._net.user_representation(
sequence_var
Expand All @@ -250,7 +248,7 @@ def fit(self, interactions, verbose=False):
loss = self._loss_func(positive_prediction,
negative_prediction,
mask=(sequence_var != PADDING_IDX))
epoch_loss += loss.data[0]
epoch_loss += loss.item()

loss.backward()

Expand All @@ -271,9 +269,8 @@ def _get_negative_prediction(self, shape, user_representation):
self._num_items,
shape,
random_state=self._random_state)
negative_var = Variable(
gpu(torch.from_numpy(negative_items), self._use_cuda)
)
negative_var = gpu(torch.from_numpy(negative_items), self._use_cuda)

negative_prediction = self._net(user_representation, negative_var)

return negative_prediction
Expand Down Expand Up @@ -323,12 +320,12 @@ def predict(self, sequences, item_ids=None):
sequences = torch.from_numpy(sequences.astype(np.int64).reshape(1, -1))
item_ids = torch.from_numpy(item_ids.astype(np.int64))

sequence_var = Variable(gpu(sequences, self._use_cuda))
item_var = Variable(gpu(item_ids, self._use_cuda))
sequence_var = gpu(sequences, self._use_cuda)
item_var = gpu(item_ids, self._use_cuda)

_, sequence_representations = self._net.user_representation(sequence_var)
size = (len(item_var),) + sequence_representations.size()[1:]
out = self._net(sequence_representations.expand(*size),
item_var)

return cpu(out.data).numpy().flatten()
return cpu(out).detach().numpy().flatten()
8 changes: 4 additions & 4 deletions spotlight/sequence/representations.py
Original file line number Diff line number Diff line change
Expand Up @@ -547,10 +547,10 @@ def user_representation(self, item_sequences):
user_representations, _ = self.lstm(sequence_embeddings)
user_representations = user_representations.permute(0, 2, 1)
user_representations = self.projection(user_representations)
user_representations = user_representations.resize(batch_size,
self.num_mixtures * 2,
self.embedding_dim,
sequence_length + 1)
user_representations = user_representations.view(batch_size,
self.num_mixtures * 2,
self.embedding_dim,
sequence_length + 1)

return user_representations[:, :, :, :-1], user_representations[:, :, :, -1:]

Expand Down
2 changes: 1 addition & 1 deletion tests/sequence/test_sequence_implicit.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def test_implicit_pooling_synthetic(randomness, expected_mrr):


@pytest.mark.parametrize('randomness, expected_mrr', [
(1e-3, 0.65),
(1e-3, 0.61),
(1e2, 0.03),
])
def test_implicit_lstm_synthetic(randomness, expected_mrr):
Expand Down
9 changes: 4 additions & 5 deletions tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import torch

import torch.nn as nn
from torch.autograd import Variable

from spotlight.layers import BloomEmbedding, ScaledEmbedding

Expand All @@ -25,15 +24,15 @@ def test_embeddings(embedding_class):
embedding_dim)

# Test 1-d inputs (minibatch)
indices = Variable(torch.from_numpy(
np.random.randint(0, num_embeddings, size=batch_size, dtype=np.int64)))
indices = torch.from_numpy(
np.random.randint(0, num_embeddings, size=batch_size, dtype=np.int64))
representation = layer(indices)
assert representation.size()[0] == batch_size
assert representation.size()[-1] == embedding_dim

# Test 2-d inputs (minibatch x sequence_length)
indices = Variable(torch.from_numpy(
indices = torch.from_numpy(
np.random.randint(0, num_embeddings,
size=(batch_size, sequence_length), dtype=np.int64)))
size=(batch_size, sequence_length), dtype=np.int64))
representation = layer(indices)
assert representation.size() == (batch_size, sequence_length, embedding_dim)

0 comments on commit c3bae11

Please sign in to comment.