Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

First step in converting to python 3.5 by running "2to3-3.5 -p -n -w ./moe". #459

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 3 additions & 5 deletions moe/bandit/bandit_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from abc import ABCMeta, abstractmethod


class BanditInterface(object):
class BanditInterface(object, metaclass=ABCMeta):

r"""Interface for a bandit algorithm.

Expand All @@ -16,8 +16,6 @@ class BanditInterface(object):

"""

__metaclass__ = ABCMeta

@abstractmethod
def allocate_arms(self):
r"""Compute the allocation to each arm given ``historical_info``, running bandit `subtype`` endpoint with hyperparameters in `hyperparameter_info``.
Expand Down Expand Up @@ -46,9 +44,9 @@ def choose_arm(arms_to_allocations):
if not arms_to_allocations:
raise ValueError('arms_to_allocations is empty!')

allocations = numpy.array(arms_to_allocations.values())
allocations = numpy.array(list(arms_to_allocations.values()))
# The winning arm is chosen based on the distribution of arm allocations.
winner = numpy.argmax(numpy.random.dirichlet(allocations))
# While the internal order of a dict is unknowable a priori, the order presented by the various iterators
# and list-ify methods is always the same as long as the dict is not modified between calls to these methods.
return arms_to_allocations.keys()[winner]
return list(arms_to_allocations.keys())[winner]
4 changes: 2 additions & 2 deletions moe/bandit/bla/bla.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(
self._historical_info = copy.deepcopy(historical_info)
self._subtype = subtype
# Validate that every arm is a Bernoulli arm.
for arm in self._historical_info.arms_sampled.itervalues():
for arm in self._historical_info.arms_sampled.values():
if not isinstance(arm, BernoulliArm):
raise ValueError('All arms have to be Bernoulli arms!')

Expand Down Expand Up @@ -122,5 +122,5 @@ def get_winning_arm_names(self, arms_sampled):
if not arms_sampled:
raise ValueError('arms_sampled is empty!')

bla_payoff_arm_name_list = [(self.get_bla_payoff(sampled_arm), arm_name) for arm_name, sampled_arm in arms_sampled.iteritems()]
bla_payoff_arm_name_list = [(self.get_bla_payoff(sampled_arm), arm_name) for arm_name, sampled_arm in arms_sampled.items()]
return get_winning_arm_names_from_payoff_arm_name_list(bla_payoff_arm_name_list)
6 changes: 3 additions & 3 deletions moe/bandit/data_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ def __str__(self, pretty_print=True):
def json_payload(self):
"""Construct a json serializeable and MOE REST recognizeable dictionary of the historical data."""
json_arms_sampled = {}
for name, arm in self._arms_sampled.iteritems():
for name, arm in self._arms_sampled.items():
json_arms_sampled[name] = arm.json_payload()
return {'arms_sampled': json_arms_sampled}

Expand All @@ -217,7 +217,7 @@ def validate_sample_arms(sample_arms):

"""
if sample_arms:
for arm in sample_arms.itervalues():
for arm in sample_arms.values():
arm.validate()

def append_sample_arms(self, sample_arms, validate=True):
Expand Down Expand Up @@ -246,7 +246,7 @@ def _update_historical_data(self, sample_arms):
:param sample_arms: the already-sampled arms: wins, losses, and totals
:type sample_arms: dictionary of (arm name, SampleArm) key-value pairs
"""
for name, arm in sample_arms.iteritems():
for name, arm in sample_arms.items():
if name in self._arms_sampled:
self._arms_sampled[name] += arm
else:
Expand Down
2 changes: 1 addition & 1 deletion moe/bandit/epsilon/epsilon_first.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def allocate_arms(self):
if not arms_sampled:
raise ValueError('sample_arms is empty!')

num_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.itervalues()])
num_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.values()])
# Exploration phase, trials 1,2,..., epsilon * T
# Allocate equal probability to all arms
if num_sampled < self._total_samples * self._epsilon:
Expand Down
2 changes: 1 addition & 1 deletion moe/bandit/epsilon/epsilon_greedy.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def allocate_arms(self):
arms_to_allocations = {}

# With probability epsilon, choose a winning arm at random. Therefore, we split the allocation epsilon among all arms.
for arm_name in arms_sampled.iterkeys():
for arm_name in arms_sampled.keys():
arms_to_allocations[arm_name] = epsilon_allocation

# With probability 1-epsilon, split allocation among winning arms.
Expand Down
2 changes: 1 addition & 1 deletion moe/bandit/epsilon/epsilon_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def get_winning_arm_names(arms_sampled):
raise ValueError('arms_sampled is empty!')

avg_payoff_arm_name_list = []
for arm_name, sampled_arm in arms_sampled.iteritems():
for arm_name, sampled_arm in arms_sampled.items():
avg_payoff = numpy.float64(sampled_arm.win - sampled_arm.loss) / sampled_arm.total if sampled_arm.total > 0 else 0
avg_payoff_arm_name_list.append((avg_payoff, arm_name))

Expand Down
6 changes: 3 additions & 3 deletions moe/bandit/ucb/ucb_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def get_unsampled_arm_names(arms_sampled):
if not arms_sampled:
raise ValueError('arms_sampled is empty!')

unsampled_arm_name_list = [name for name, sampled_arm in arms_sampled.iteritems() if sampled_arm.total == 0]
unsampled_arm_name_list = [name for name, sampled_arm in arms_sampled.items() if sampled_arm.total == 0]
return frozenset(unsampled_arm_name_list)

@abstractmethod
Expand Down Expand Up @@ -131,8 +131,8 @@ def get_winning_arm_names(self, arms_sampled):
if unsampled_arm_names:
return unsampled_arm_names

number_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.itervalues()])
number_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.values()])

ucb_payoff_arm_name_list = [(self.get_ucb_payoff(sampled_arm, number_sampled), arm_name) for arm_name, sampled_arm in arms_sampled.iteritems()]
ucb_payoff_arm_name_list = [(self.get_ucb_payoff(sampled_arm, number_sampled), arm_name) for arm_name, sampled_arm in arms_sampled.items()]

return get_winning_arm_names_from_payoff_arm_name_list(ucb_payoff_arm_name_list)
8 changes: 4 additions & 4 deletions moe/bandit/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ def get_winning_arm_names_from_payoff_arm_name_list(payoff_arm_name_list):
best_payoff, _ = max(payoff_arm_name_list)

# Filter out arms that have payoff less than the best payoff
winning_arm_payoff_name_list = filter(lambda payoff_arm_name: payoff_arm_name[0] == best_payoff, payoff_arm_name_list)
winning_arm_payoff_name_list = [payoff_arm_name for payoff_arm_name in payoff_arm_name_list if payoff_arm_name[0] == best_payoff]
# Extract a list of winning arm names from a list of (payoff, arm name) tuples.
_, winning_arm_name_list = map(list, zip(*winning_arm_payoff_name_list))
_, winning_arm_name_list = list(map(list, list(zip(*winning_arm_payoff_name_list))))
winning_arm_names = frozenset(winning_arm_name_list)
return winning_arm_names

Expand All @@ -46,14 +46,14 @@ def get_equal_arm_allocations(arms_sampled, winning_arm_names=None):

# If no ``winning_arm_names`` given, split allocations among ``arms_sampled``.
if winning_arm_names is None:
winning_arm_names = frozenset([arm_name for arm_name in arms_sampled.iterkeys()])
winning_arm_names = frozenset([arm_name for arm_name in arms_sampled.keys()])

num_winning_arms = len(winning_arm_names)
arms_to_allocations = {}

winning_arm_allocation = 1.0 / num_winning_arms
# Split allocation among winning arms, all other arms get allocation of 0.
for arm_name in arms_sampled.iterkeys():
for arm_name in arms_sampled.keys():
arms_to_allocations[arm_name] = winning_arm_allocation if arm_name in winning_arm_names else 0.0

return arms_to_allocations
6 changes: 3 additions & 3 deletions moe/easy_interface/simple_endpoint.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Simple functions for hitting the REST endpoints of a MOE service."""
import contextlib
import urllib2
import urllib.request, urllib.error, urllib.parse

import simplejson as json

Expand All @@ -20,8 +20,8 @@ def call_endpoint_with_payload(rest_host, rest_port, endpoint, json_payload, tes
"""Send a POST request to a ``url`` with a given ``json_payload``, return the response as a dict."""
if testapp is None:
url = "http://{0}:{1:d}{2}".format(rest_host, rest_port, endpoint)
request = urllib2.Request(url, json_payload, {'Content-Type': 'application/json'})
with contextlib.closing(urllib2.urlopen(request)) as f:
request = urllib.request.Request(url, json_payload, {'Content-Type': 'application/json'})
with contextlib.closing(urllib.request.urlopen(request)) as f:
response = f.read()
else:
response = testapp.post(endpoint, json_payload).body
Expand Down
2 changes: 1 addition & 1 deletion moe/optimal_learning/python/comparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __repr__(self):
"""
return '{0:s}({1:s})'.format(
self.__class__.__name__,
', '.join(map(lambda pair: '{0}={1}'.format(pair[0], pair[1]), self._get_comparable_members())),
', '.join(['{0}={1}'.format(pair[0], pair[1]) for pair in self._get_comparable_members()]),
)

def __eq__(self, other):
Expand Down
4 changes: 2 additions & 2 deletions moe/optimal_learning/python/data_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def validate_historical_data(dim, points_sampled, points_sampled_value, points_s
raise ValueError('Input arrays do not have the same leading dimension: (points_sampled, value, noise) = ({0:d}, {1:d}, {2:d})'.format(points_sampled.shape[0], points_sampled_value.size, points_sampled_noise_variance.size))

if points_sampled.shape[0] > 0:
for i in xrange(points_sampled.shape[0]):
for i in range(points_sampled.shape[0]):
temp = SamplePoint(points_sampled[i], points_sampled_value[i], points_sampled_noise_variance[i])
temp.validate(dim=dim)

Expand Down Expand Up @@ -262,7 +262,7 @@ def to_list_of_sample_points(self):

"""
return [SamplePoint(numpy.copy(self._points_sampled[i]), self._points_sampled_value[i], noise_variance=self._points_sampled_noise_variance[i])
for i in xrange(self.num_sampled)]
for i in range(self.num_sampled)]

def _update_historical_data(self, offset, sample_points):
"""Copy (in "transposed" order) data from ``sample_points`` into this object's data members, starting at index ``offset``.
Expand Down
4 changes: 2 additions & 2 deletions moe/optimal_learning/python/geometry_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def generate_latin_hypercube_points(num_points, domain_bounds):
ordering = numpy.arange(num_points)
numpy.random.shuffle(ordering)

for j in xrange(num_points):
for j in range(num_points):
point_base = interval.min + subcube_edge_length * ordering[j]
points[j, i] = point_base + numpy.random.uniform(0.0, subcube_edge_length)

Expand Down Expand Up @@ -82,7 +82,7 @@ def generate_grid_points(points_per_dimension, domain_bounds):
# vstack stacks inputs vertically: so for our 1D arrays, the i-th input becomes
# the i-th row in a matrix. And since each mesh_grid output has *every* coordinate
# of the grid in that dimension, the *columns* of the stack contain every grid point.
return numpy.vstack(map(numpy.ravel, mesh_grid)).T
return numpy.vstack(list(map(numpy.ravel, mesh_grid))).T


# See ClosedInterval (below) for docstring.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from abc import ABCMeta, abstractmethod, abstractproperty


class CovarianceInterface(object):
class CovarianceInterface(object, metaclass=ABCMeta):

r"""Interface for a covariance function: covariance of two points and spatial/hyperparameter derivatives.

Expand All @@ -48,8 +48,6 @@ class CovarianceInterface(object):

"""

__metaclass__ = ABCMeta

@abstractproperty
def num_hyperparameters(self):
"""Return the number of hyperparameters of this covariance function."""
Expand Down
4 changes: 1 addition & 3 deletions moe/optimal_learning/python/interfaces/domain_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,10 @@
from abc import ABCMeta, abstractmethod, abstractproperty


class DomainInterface(object):
class DomainInterface(object, metaclass=ABCMeta):

"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""

__metaclass__ = ABCMeta

@abstractproperty
def dim(self):
"""Return the number of spatial dimensions."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
from abc import ABCMeta, abstractmethod, abstractproperty


class ExpectedImprovementInterface(object):
class ExpectedImprovementInterface(object, metaclass=ABCMeta):

r"""Interface for Expected Improvement computation: EI and its gradient at specified point(s) sampled from a GaussianProcess.

Expand All @@ -50,8 +50,6 @@ class ExpectedImprovementInterface(object):

"""

__metaclass__ = ABCMeta

@abstractproperty
def dim(self):
"""Return the number of spatial dimensions."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from abc import ABCMeta, abstractmethod, abstractproperty


class GaussianProcessDataInterface(object):
class GaussianProcessDataInterface(object, metaclass=ABCMeta):

"""Core data interface for constructing or manipulating a Gaussian Process Prior (GPP).

Expand All @@ -34,8 +34,6 @@ class GaussianProcessDataInterface(object):

"""

__metaclass__ = ABCMeta

@abstractmethod
def get_covariance_copy(self):
"""Return a copy of the covariance object specifying the Gaussian Process.
Expand All @@ -61,7 +59,7 @@ def get_core_data_copy(self):
return self.get_covariance_copy(), self.get_historical_data_copy()


class GaussianProcessInterface(GaussianProcessDataInterface):
class GaussianProcessInterface(GaussianProcessDataInterface, metaclass=ABCMeta):

r"""Interface for a GaussianProcess: mean, variance, gradients thereof, and data I/O.

Expand Down Expand Up @@ -101,8 +99,6 @@ class GaussianProcessInterface(GaussianProcessDataInterface):

"""

__metaclass__ = ABCMeta

@staticmethod
def _clamp_num_derivatives(num_points, num_derivatives):
"""Clamp num_derivatives so that the result is 0 <= result <= num_points; negative num_derivatives yields num_points.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@
from moe.optimal_learning.python.interfaces.gaussian_process_interface import GaussianProcessDataInterface


class GaussianProcessLogLikelihoodInterface(GaussianProcessDataInterface):
class GaussianProcessLogLikelihoodInterface(GaussianProcessDataInterface, metaclass=ABCMeta):

r"""Interface for computation of log likelihood (and log likelihood-like) measures of model fit along with its gradient and hessian.

Expand All @@ -103,8 +103,6 @@ class GaussianProcessLogLikelihoodInterface(GaussianProcessDataInterface):

"""

__metaclass__ = ABCMeta

@abstractproperty
def dim(self):
"""Return the number of spatial dimensions."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from abc import ABCMeta, abstractmethod, abstractproperty


class OptimizableInterface(object):
class OptimizableInterface(object, metaclass=ABCMeta):

r"""Interface that an object must fulfill to be optimized by an implementation of OptimizerInterface.

Expand All @@ -37,8 +37,6 @@ class OptimizableInterface(object):

"""

__metaclass__ = ABCMeta

@abstractproperty
def problem_size(self):
"""Return the number of independent parameters to optimize."""
Expand Down Expand Up @@ -93,7 +91,7 @@ def compute_hessian_objective_function(self, **kwargs):
pass


class OptimizerInterface(object):
class OptimizerInterface(object, metaclass=ABCMeta):

r"""Interface to *maximize* any object implementing OptimizableInterface (defined above).

Expand All @@ -106,8 +104,6 @@ class OptimizerInterface(object):

"""

__metaclass__ = ABCMeta

@abstractmethod
def optimize(self, **kwargs):
r"""Maximize a function f(x), represented by an implementation of OptimizableInterface.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ def multivar_norm_cdf(upper, cov_matrix):
sum_term += cov_k[0, k] * singlevar_norm_pdf(m_k[0], cov_k[0, 0], b_k[0])
else:
for i in range(0, num_points):
index_no_i = range(0, i) + range(i + 1, num_points)
index_no_i = list(range(0, i)) + list(range(i + 1, num_points))

# c_k introduced on top of page 4
c_k = (b_k - m_k) - (b_k[i] - m_k[i]) * cov_k[i, :] / cov_k[i, i]
Expand Down Expand Up @@ -595,7 +595,7 @@ def _compute_grad_expected_improvement_monte_carlo_naive(self, mu_star, var_star
winner = numpy.argmax(improvements_this_iter)
if winner < self.num_to_sample:
aggregate_dx[winner, ...] -= grad_mu[winner, ...]
for diff_index in xrange(self.num_to_sample):
for diff_index in range(self.num_to_sample):
# grad_chol_decomp_{diff_index, winner, i, j} * normal_draws_{i}
aggregate_dx[diff_index, ...] -= numpy.dot(grad_chol_decomp[diff_index, winner, ...].T, normal_draws)

Expand Down Expand Up @@ -700,9 +700,9 @@ def _compute_grad_expected_improvement_monte_carlo(self, mu_star, var_star, grad
# will never be very large.
# This tradeoff may change when GH-60 is done.
grad_chol_decomp_tiled = numpy.empty((normals_compressed.shape[0], grad_chol_decomp.shape[2], grad_chol_decomp.shape[3]))
for diff_index in xrange(self.num_to_sample):
for diff_index in range(self.num_to_sample):
grad_chol_decomp_tiled[...] = 0.0
for i in xrange(num_points):
for i in range(num_points):
# Only track the iterations where point i had the best improvement (winner)
winner_indexes_equal_to_i = winner_indexes_tiled_equal_to_diff_index[i, ...]

Expand Down
Loading