diff --git a/examples/2D_leave_n_out.py b/examples/2D_leave_n_out.py index 565c1ab..500725a 100644 --- a/examples/2D_leave_n_out.py +++ b/examples/2D_leave_n_out.py @@ -1,3 +1,4 @@ +from __future__ import print_function __author__ = 'cpaulson' import pyKriging from pyKriging.krige import kriging @@ -15,7 +16,7 @@ # We generate our observed values based on our sampling plan and the test function y = testfun(X) -print 'Setting up the Kriging Model' +print('Setting up the Kriging Model') cvMSE = [] # Now that we have our initial data, we can create an instance of a kriging model k = kriging(X, y, testfunction=testfun, name='simple', testPoints=300) @@ -26,7 +27,7 @@ k.plot() for i in range(15): - print i + print(i) newpoints = k.infill(1) for point in newpoints: # print 'Adding point {}'.format(point) @@ -43,15 +44,15 @@ # #And plot the model -print 'Now plotting final results...' +print('Now plotting final results...') # k.plot() -print k.testPoints -print k.history['points'] -print k.history['rsquared'] -print k.history['avgMSE'] -print cvMSE +print(k.testPoints) +print(k.history['points']) +print(k.history['rsquared']) +print(k.history['avgMSE']) +print(cvMSE) from matplotlib import pylab as plt plt.plot(range(len(k.history['rsquared'])), k.history['rsquared']) plt.plot(range(len(cvMSE)), cvMSE) diff --git a/examples/2D_model_convergence.py b/examples/2D_model_convergence.py index bebad2c..ffb7fed 100644 --- a/examples/2D_model_convergence.py +++ b/examples/2D_model_convergence.py @@ -1,3 +1,4 @@ +from __future__ import print_function __author__ = 'cpaulson' import pyKriging from pyKriging.krige import kriging @@ -13,7 +14,7 @@ # We generate our observed values based on our sampling plan and the test function y = testfun(X) -print 'Setting up the Kriging Model' +print('Setting up the Kriging Model') # Now that we have our initial data, we can create an instance of a kriging model k = kriging(X, y, testfunction=testfun, name='simple', testPoints=250) @@ -25,17 +26,17 @@ while k.history['rsquared'][-1] < 0.9999: newpoints = k.infill(2) for point in newpoints: - print 'Adding point {}'.format(point) + print('Adding point {}'.format(point)) k.addPoint(point, testfun(point)[0]) k.train() k.snapshot() - print 'Current rsquared is: {}'.format(k.history['rsquared'][-1]) + print('Current rsquared is: {}'.format(k.history['rsquared'][-1])) -print 'The prediction has converged, with {} number of points in the model'.format(k.n) +print('The prediction has converged, with {} number of points in the model'.format(k.n)) # #And plot the model -print 'Now plotting final results...' +print('Now plotting final results...') k.plot() diff --git a/examples/2D_simple_train.py b/examples/2D_simple_train.py index e28458a..bf1a3c1 100644 --- a/examples/2D_simple_train.py +++ b/examples/2D_simple_train.py @@ -1,3 +1,4 @@ +from __future__ import print_function __author__ = 'cpaulson' import pyKriging from pyKriging.krige import kriging @@ -13,7 +14,7 @@ # We generate our observed values based on our sampling plan and the test function y = testfun(X) -print 'Setting up the Kriging Model' +print('Setting up the Kriging Model') # Now that we have our initial data, we can create an instance of a kriging model k = kriging(X, y, testfunction=testfun, name='simple', testPoints=250) @@ -23,14 +24,14 @@ for i in range(5): newpoints = k.infill(2) for point in newpoints: - print 'Adding point {}'.format(point) + print(('Adding point {}'.format(point))) k.addPoint(point, testfun(point)[0]) k.train() k.snapshot() # #And plot the model -print 'Now plotting final results...' +print('Now plotting final results...') k.plot() diff --git a/examples/2D_simple_train_expected_improvement.py b/examples/2D_simple_train_expected_improvement.py index 3be1ad0..9e4b975 100644 --- a/examples/2D_simple_train_expected_improvement.py +++ b/examples/2D_simple_train_expected_improvement.py @@ -1,3 +1,4 @@ +from __future__ import print_function __author__ = 'cpaulson' import pyKriging from pyKriging.krige import kriging @@ -15,7 +16,7 @@ optimizer = 'ga' # Now that we have our initial data, we can create an instance of a kriging model -print 'Setting up the Kriging Model' +print('Setting up the Kriging Model') k = kriging(X, y, testfunction=testfun, name='simple_ei', testPoints=300) k.train(optimizer=optimizer) k.snapshot() @@ -25,7 +26,7 @@ for i in range(5): newpoints = k.infill(1, method='error') for point in newpoints: - print 'Adding point {}'.format(point) + print('Adding point {}'.format(point)) k.addPoint(point, testfun(point)[0]) k.train(optimizer=optimizer) k.snapshot() @@ -34,13 +35,13 @@ for i in range(5): newpoints = k.infill(1, method='ei') for point in newpoints: - print 'Adding point {}'.format(point) + print('Adding point {}'.format(point)) k.addPoint(point, testfun(point)[0]) k.train(optimizer=optimizer) k.snapshot() # And plot the results -print 'Now plotting final results...' +print('Now plotting final results...') k.plot() diff --git a/examples/2d_regression_Kriging.py b/examples/2d_regression_Kriging.py index a810465..b4065c2 100644 --- a/examples/2d_regression_Kriging.py +++ b/examples/2d_regression_Kriging.py @@ -1,3 +1,4 @@ +from __future__ import print_function __author__ = 'cpaulson' import sys sys.path.insert(0, '../') @@ -16,26 +17,26 @@ # We generate our observed values based on our sampling plan and the test function y = testfun(X) -print X, y +print(X, y) testfun = pyKriging.testfunctions().branin -print 'Setting up the Kriging Model' +print('Setting up the Kriging Model') # Now that we have our initial data, we can create an instance of a kriging model k = regression_kriging(X, y, testfunction=testfun, name='simple', testPoints=250) k.train(optimizer='pso') k1 = kriging(X, y, testfunction=testfun, name='simple', testPoints=250) k1.train(optimizer='pso') -print k.Lambda +print(k.Lambda) k.snapshot() for i in range(1): newpoints = k.infill(5) for point in newpoints: - print 'Adding point {}'.format(point) + print('Adding point {}'.format(point)) newValue = testfun(point)[0] k.addPoint(point, newValue) k1.addPoint(point, newValue) @@ -45,8 +46,8 @@ # # # #And plot the model -print 'Now plotting final results...' -print k.Lambda +print('Now plotting final results...') +print(k.Lambda) k.plot(show=False) k1.plot() diff --git a/examples/3d_Simple_Train.py b/examples/3d_Simple_Train.py index d96325e..72b7471 100644 --- a/examples/3d_Simple_Train.py +++ b/examples/3d_Simple_Train.py @@ -1,3 +1,4 @@ +from __future__ import print_function import pyKriging from pyKriging.krige import kriging from pyKriging.samplingplan import samplingplan @@ -23,13 +24,13 @@ # The infill method can be used for this # In this example, we will add nine points in three batches. The model gets trained after each stage for i in range(10): - print k.history['rsquared'][-1] - print 'Infill iteration {0}'.format(i + 1) + print(k.history['rsquared'][-1]) + print('Infill iteration {0}'.format(i + 1)) infillPoints = k.infill(10) # Evaluate the infill points and add them back to the Kriging model for point in infillPoints: - print 'Adding point {}'.format(point) + print('Adding point {}'.format(point)) k.addPoint(point, testfun(point)[0]) # Retrain the model with the new points added in to the model diff --git a/examples/coKriging.py b/examples/coKriging.py index d1dddb7..5257d46 100644 --- a/examples/coKriging.py +++ b/examples/coKriging.py @@ -1,3 +1,4 @@ +from __future__ import print_function __author__ = 'cpaulson' import sys @@ -14,8 +15,8 @@ def cheap(X): C=-5 D=0 - print X - print ((X+D)*6-2) + print(X) + print(((X+D)*6-2)) return A*np.power( ((X+D)*6-2), 2 )*np.sin(((X+D)*6-2)*2)+((X+D)-0.5)*B+C def expensive(X): @@ -30,7 +31,7 @@ def expensive(X): ck = coKriging.coKriging(Xc, yc, Xe, ye) ck.thetac = np.array([1.2073]) -print ck.Xc +print(ck.Xc) ck.updateData() ck.updatePsi() ck.neglnlikehood() diff --git a/pyKriging/GlobalSensitivity.py b/pyKriging/GlobalSensitivity.py index 10b1b14..226a805 100644 --- a/pyKriging/GlobalSensitivity.py +++ b/pyKriging/GlobalSensitivity.py @@ -83,8 +83,8 @@ def sensitivity_Sobol(self, Model, plot=0): data = np.genfromtxt(dataFile, delimiter=' ', invalid_raise=False) X = data[:,[3,4]] VAS = data[:, [5]][:,0] - print VAS + print(VAS) AASM = data[:, [6]][:,0] - print AASM + print(AASM) VAD = data[:, [7]][:,0] - print VAD \ No newline at end of file + print(VAD) \ No newline at end of file diff --git a/pyKriging/coKriging.py b/pyKriging/coKriging.py index 1e8bd61..7a72ed2 100644 --- a/pyKriging/coKriging.py +++ b/pyKriging/coKriging.py @@ -40,7 +40,7 @@ def __init__(self, Xc, yc, Xe, ye): self.one=ones([self.ne+self.nc,1]) self.y=[self.yc, self.ye] - print 'here1' + print('here1') def reorder_data(self): xe = [] @@ -53,7 +53,7 @@ def reorder_data(self): for enu,entry in enumerate(self.Xc): if entry in self.Xe: - print 'Found this value in XE!!' + print('Found this value in XE!!') for enu1,test in enumerate(self.Xe): # if entry[0] == test[0] and entry[1] == test[1]: if entry == test: @@ -87,25 +87,25 @@ def updateData(self): def traincheap(self): self.kc = kriging(self.Xc, self.yc) self.kc.train() - print + print() def distanceXc(self): self.distanceXc = np.zeros((self.nc,self.nc, self.k)) for i in range( self.nc ): - for j in xrange(i+1,self.nc): + for j in range(i+1,self.nc): self.distanceXc[i][j] = np.abs((self.Xc[i]-self.Xc[j])) def distanceXe(self): self.distanceXe = np.zeros((self.ne,self.ne, self.k)) for i in range( self.ne ): - for j in xrange(i+1,self.ne): + for j in range(i+1,self.ne): self.distanceXe[i][j] = np.abs((self.Xe[i]-self.Xe[j])) def distanceXcXe(self): self.distanceXcXe = np.zeros((self.nc,self.ne, self.k)) for i in range( self.nc ): - for j in xrange(self.ne): + for j in range(self.ne): self.distanceXcXe[i][j] = np.abs((self.Xc[i]-self.Xe[j])) @@ -118,13 +118,13 @@ def updatePsi(self): # print self.pc # print self.distanceXc newPsicXc = np.exp(-np.sum(self.thetac*np.power(self.distanceXc,self.pc), axis=2)) - print newPsicXc[0] + print(newPsicXc[0]) self.PsicXc = np.triu(newPsicXc,1) self.PsicXc = self.PsicXc + self.PsicXc.T + np.mat(eye(self.nc))+np.multiply(np.mat(eye(self.nc)),np.spacing(1)) self.UPsicXc = np.linalg.cholesky(self.PsicXc) self.UPsicXc = self.UPsicXc.T - print self.PsicXc[0] - print self.UPsicXc + print(self.PsicXc[0]) + print(self.UPsicXc) exit() newPsicXe = np.exp(-np.sum(self.thetac*np.power(self.distanceXe,self.pc), axis=2)) @@ -149,22 +149,22 @@ def neglnlikehood(self): self.muc = c/f # This only works if yc is transposed, then its a scalar under two layers of arrays. Correct? Not sure - print 'y',self.yd.T + print('y',self.yd.T) a = np.linalg.solve(self.UPsicXe.T, self.yd) - print 'a',a + print('a',a) b = np.linalg.solve(self.UPsicXe, a) - print 'b', b + print('b', b) c = ones([self.ne,1]) * b - print 'c', c + print('c', c) d = np.linalg.solve(self.UPsicXe.T, ones([self.ne,1], dtype=float)) - print d + print(d) e = np.linalg.solve(self.UPsicXe, d) - print e + print(e) f = ones([self.ne,1]).T * e - print f + print(f) self.mud= c/f @@ -175,9 +175,9 @@ def neglnlikehood(self): - print self.ne - print self.mud - print self.UPsicXe.T + print(self.ne) + print(self.mud) + print(self.UPsicXe.T) a = np.linalg.solve(self.UPsicXe.T,(self.yd-ones([self.ne,1])*self.mud))/self.ne b = np.linalg.solve(self.UPsicXe, a) self.SigmaSqrd=(self.yd-ones([self.ne,1])*self.mud).T* b @@ -197,7 +197,7 @@ def fe(X): return np.power(X[:,0], 2) + np.power(X[:,1], 2) if __name__=='__main__': - import samplingplan + from . import samplingplan import random sp = samplingplan.samplingplan(2) X = sp.optimallhc(20) diff --git a/pyKriging/krige.py b/pyKriging/krige.py index 77ebf73..d401ccc 100644 --- a/pyKriging/krige.py +++ b/pyKriging/krige.py @@ -1,10 +1,10 @@ -from __future__ import division + __author__ = 'chrispaulson' import numpy as np import scipy from scipy.optimize import minimize -from matrixops import matrixops +from .matrixops import matrixops import copy from matplotlib import pyplot as plt import pylab @@ -169,7 +169,7 @@ def updateModel(self): ''' try: self.updatePsi() - except Exception, err: + except Exception as err: #pass # print Exception, err raise Exception("bad params") @@ -441,7 +441,7 @@ def fittingObjective(self,candidates, args): self.updateModel() self.neglikelihood() f = self.NegLnLike - except Exception, e: + except Exception as e: # print 'Failure in NegLNLike, failing the run' # print Exception, e f = 10000 @@ -462,7 +462,7 @@ def fittingObjective_local(self,entry): self.updateModel() self.neglikelihood() f = self.NegLnLike - except Exception, e: + except Exception as e: # print 'Failure in NegLNLike, failing the run' # print Exception, e f = 10000 @@ -513,7 +513,7 @@ def plot(self, labels=False, show=True): if self.k==2: fig = pylab.figure(figsize=(8,6)) - samplePoints = zip(*self.X) + samplePoints = list(zip(*self.X)) # Create a set of data to plot plotgrid = 61 x = np.linspace(self.normRange[0][0], self.normRange[0][1], num=plotgrid) @@ -545,7 +545,7 @@ def plot(self, labels=False, show=True): ax = fig.add_subplot(221) if self.testfunction: # Setup the truth function - zt = self.testfunction( np.array(zip(np.ravel(X), np.ravel(Y))) ) + zt = self.testfunction( np.array(list(zip(np.ravel(X), np.ravel(Y)))) ) ZT = zt.reshape(X.shape) CS = pylab.contour(X,Y,ZT,contour_levels ,colors='k',zorder=2) @@ -616,7 +616,7 @@ def saveFigure(self, name=None): mlab.savefig('%s_actual.wrl' % name, figure=truthFig) mlab.close(all=True) if self.k == 2: - samplePoints = zip(*self.X) + samplePoints = list(zip(*self.X)) # Create a set of data to plot plotgrid = 61 x = np.linspace(0, 1, num=plotgrid) @@ -636,8 +636,8 @@ def saveFigure(self, name=None): if self.testfunction: # Setup the truth function zt = self.testfunction(np.array( - zip(np.ravel((X * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0]), - np.ravel((Y * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0])))) + list(zip(np.ravel((X * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0]), + np.ravel((Y * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0]))))) ZT = zt.reshape(X.shape) # Plot real world values diff --git a/pyKriging/matrixops.py b/pyKriging/matrixops.py index 06f7ace..2cdb5c4 100644 --- a/pyKriging/matrixops.py +++ b/pyKriging/matrixops.py @@ -1,4 +1,4 @@ -from __future__ import division, absolute_import + import numpy as np from numpy.matlib import rand,zeros,ones,empty,eye import scipy @@ -17,8 +17,8 @@ def __init__(self): def updateData(self): self.distance = np.zeros((self.n,self.n, self.k)) - for i in xrange(self.n): - for j in xrange(i+1,self.n): + for i in range(self.n): + for j in range(i+1,self.n): self.distance[i,j]= np.abs((self.X[i]-self.X[j])) def updatePsi(self): @@ -80,15 +80,15 @@ def predicterr_normalized(self,x): for i in range(self.n): try: self.psi[i]=np.exp(-np.sum(self.theta*np.power((np.abs(self.X[i]-x)),self.pl))) - except Exception,e: - print Exception,e + except Exception as e: + print(Exception,e) try: SSqr=self.SigmaSqr*(1-self.psi.T.dot(np.linalg.solve(self.U, np.linalg.solve(self.U.T,self.psi)))) - except Exception, e: - print self.U.shape - print self.SigmaSqr.shape - print self.psi.shape - print Exception,e + except Exception as e: + print(self.U.shape) + print(self.SigmaSqr.shape) + print(self.psi.shape) + print(Exception,e) pass SSqr = np.abs(SSqr[0]) @@ -98,12 +98,12 @@ def regression_predicterr_normalized(self,x): for i in range(self.n): try: self.psi[i]=np.exp(-np.sum(self.theta*np.power((np.abs(self.X[i]-x)),self.pl))) - except Exception,e: - print Exception,e + except Exception as e: + print(Exception,e) try: SSqr=self.SigmaSqr*(1+self.Lambda-self.psi.T.dot(np.linalg.solve(self.U, np.linalg.solve(self.U.T,self.psi)))) - except Exception, e: - print Exception,e + except Exception as e: + print(Exception,e) pass SSqr = np.abs(SSqr[0]) diff --git a/pyKriging/regressionkrige.py b/pyKriging/regressionkrige.py index 22ffcb7..c8a76b3 100644 --- a/pyKriging/regressionkrige.py +++ b/pyKriging/regressionkrige.py @@ -1,10 +1,10 @@ -from __future__ import division + __author__ = 'chrispaulson' import numpy as np import scipy from scipy.optimize import minimize -from matrixops import matrixops +from .matrixops import matrixops import copy from matplotlib import pyplot as plt import pylab @@ -143,7 +143,7 @@ def addPoint(self, newX, newy, norm=True): try: self.updateModel() except: - print 'Couldnt update the model with these hyperparameters, retraining' + print('Couldnt update the model with these hyperparameters, retraining') self.train() else: break @@ -166,7 +166,7 @@ def updateModel(self): ''' try: self.regupdatePsi() - except Exception, err: + except Exception as err: pass # print Exception, err # raise Exception("bad params") @@ -444,7 +444,7 @@ def fittingObjective(self,candidates, args): self.updateModel() self.regneglikelihood() f = self.NegLnLike - except Exception, e: + except Exception as e: # print 'Failure in NegLNLike, failing the run' # print Exception, e f = 10000 @@ -466,7 +466,7 @@ def fittingObjective_local(self,entry): self.updateModel() self.regneglikelihood() f = self.NegLnLike - except Exception, e: + except Exception as e: # print 'Failure in NegLNLike, failing the run' # print Exception, e f = 10000 @@ -517,7 +517,7 @@ def plot(self, labels=False, show=True): if self.k==2: fig = pylab.figure(figsize=(8,6)) - samplePoints = zip(*self.X) + samplePoints = list(zip(*self.X)) # Create a set of data to plot plotgrid = 61 x = np.linspace(self.normRange[0][0], self.normRange[0][1], num=plotgrid) @@ -549,7 +549,7 @@ def plot(self, labels=False, show=True): ax = fig.add_subplot(221) if self.testfunction: # Setup the truth function - zt = self.testfunction( np.array(zip(np.ravel(X), np.ravel(Y))) ) + zt = self.testfunction( np.array(list(zip(np.ravel(X), np.ravel(Y)))) ) ZT = zt.reshape(X.shape) CS = pylab.contour(X,Y,ZT,contour_levels ,colors='k',zorder=2) @@ -620,7 +620,7 @@ def saveFigure(self, name=None): mlab.savefig('%s_actual.wrl' % name, figure=truthFig) mlab.close(all=True) if self.k == 2: - samplePoints = zip(*self.X) + samplePoints = list(zip(*self.X)) # Create a set of data to plot plotgrid = 61 x = np.linspace(0, 1, num=plotgrid) @@ -640,8 +640,8 @@ def saveFigure(self, name=None): if self.testfunction: # Setup the truth function zt = self.testfunction(np.array( - zip(np.ravel((X * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0]), - np.ravel((Y * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0])))) + list(zip(np.ravel((X * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0]), + np.ravel((Y * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0]))))) ZT = zt.reshape(X.shape) # Plot real world values diff --git a/pyKriging/samplingplan.py b/pyKriging/samplingplan.py index 8e82ee0..3f2f3f8 100644 --- a/pyKriging/samplingplan.py +++ b/pyKriging/samplingplan.py @@ -32,7 +32,7 @@ def rlh(self,n,Edges=0): #exclude 0 - for i in xrange(0,self.k): + for i in range(0,self.k): X[:,i] = np.transpose(np.random.permutation(np.arange(1,n+1,1))) if Edges == 1: @@ -63,14 +63,17 @@ def optimallhc(self,n,population=30, iterations=30, generation=False): """ - if not generation: + ## TODO: This code isnt working in the Python3 branch. + + # if not generation: + # Check for existing LHC sampling plans - if os.path.isfile('{0}lhc_{1}_{2}.pkl'.format(self.path,self.k, n)): - X = pickle.load(open('{0}lhc_{1}_{2}.pkl'.format(self.path,self.k, n), 'r')) - return X - else: - print self.path - print 'SP not found on disk, generating it now.' + # if os.path.isfile('{0}lhc_{1}_{2}.pkl'.format(self.path,self.k, n)): + # X = pickle.load(open('{0}lhc_{1}_{2}.pkl'.format(self.path,self.k, n), 'rb')) + # return X + # else: + # print(self.path) + # print('SP not found on disk, generating it now.') #list of qs to optimise Phi_q for q = [1,2,5,10,20,50,100] @@ -84,18 +87,18 @@ def optimallhc(self,n,population=30, iterations=30, generation=False): X3D = np.zeros((n,self.k,len(q))) #for each q optimize Phi_q - for i in xrange(len(q)): - print ('Now_optimizing_for_q = %d \n' %q[i]) + for i in range(len(q)): + print(('Now_optimizing_for_q = %d \n' %q[i])) X3D[:,:,i] = self.mmlhs(XStart, population, iterations, q[i]) #sort according to the Morris-Mitchell criterion Index = self.mmsort(X3D,p) - print ('Best_lh_found_using_q = %d \n' %q[Index[1]]) + print(('Best_lh_found_using_q = %d \n' %q[Index[1]])) #and the Latin hypercube with the best space-filling properties is X = X3D[:,:,Index[1]] - pickle.dump(X, open('{0}lhc_{1}_{2}.pkl'.format(self.path,self.k, n), 'wb')) + # pickle.dump(X, open('{0}lhc_{1}_{2}.pkl'.format(self.path,self.k, n), 'wb')) return X @@ -248,7 +251,7 @@ def jd(self, X,p=1): n = np.size(X[:,1]) #computes the distances between all pairs of points - d = np.zeros((n*(n-1)/2)) + d = np.zeros((n*(n-1)//2)) @@ -260,28 +263,16 @@ def jd(self, X,p=1): # d[((i-1)*n - (i-1)*i/2 + j - i )] = np.linalg.norm((X[i,:] - X[j,:]),2) #an alternative way of the above loop - list = [(i,j) for i in xrange(n-1) for j in xrange(i+1,n)] + list = [(i,j) for i in range(n-1) for j in range(i+1,n)] for k,l in enumerate(list): d[k] = np.linalg.norm((X[l[0],:]-X[l[1],:]),p) #remove multiple occurences - distinct_d = np.unique(d) - - #pre-allocate memory for J - J = np.zeros(np.size(distinct_d)) - - #generate multiplicity array - for i in xrange(len(distinct_d)): - #J(i) will contain the number of pairs separated - #by the distance distinct_d(i) - J[i]=np.sum(self.ismember(d,distinct_d[i])) + distinct_d, J = np.unique(d, return_counts=True) return J, distinct_d - def ismember(self, A, B): - return [ np.sum(a == B) for a in A ] - def mm(self,X1,X2,p=1): """ Given two sampling plans chooses the one with the better space-filling properties @@ -325,7 +316,7 @@ def mm(self,X1,X2,p=1): #generate vector c such that c(i)=1 if V1(i)>V2(i), c(i)=2 if V1(i)