diff --git a/pyhsmm/basic/distributions.py b/pyhsmm/basic/distributions.py index faed175f620973a81e90b243afb1dd26e925948e..940862e1465205b8b964cf8c8f7cf112c9cdcc18 100644 --- a/pyhsmm/basic/distributions.py +++ b/pyhsmm/basic/distributions.py @@ -4,7 +4,7 @@ from scipy.misc import logsumexp from pybasicbayes.distributions import * from pybasicbayes.models import MixtureDistribution -from abstractions import DurationDistribution +from .abstractions import DurationDistribution ############################################## # Mixins for making duratino distributions # diff --git a/pyhsmm/models.py b/pyhsmm/models.py index 1de2dd1fb37718ae2944c7267c2535a44d947d9b..ac6926a13dbddd0b7aa8f2c50272634225e08f01 100644 --- a/pyhsmm/models.py +++ b/pyhsmm/models.py @@ -1,8 +1,6 @@ from __future__ import division from future.utils import iteritems, itervalues -from builtins import map - -import sys +from builtins import map, zip import numpy as np import itertools @@ -189,7 +187,7 @@ class _HMMBase(Model): def used_states(self): 'a list of the used states in the order they appear' c = itertools.count() - canonical_ids = collections.defaultdict(c.__next__ if sys.version_info.major == 3 else c.next) + canonical_ids = collections.defaultdict(next(c)) for s in self.states_list: for state in s.stateseq: canonical_ids[state] diff --git a/pyhsmm/util/general.py b/pyhsmm/util/general.py index 4b354a50f5c320686371c4a85da310cf5d7d8dde..c9a96f96d6949c406f46c9dc662c9759c227a748 100644 --- a/pyhsmm/util/general.py +++ b/pyhsmm/util/general.py @@ -1,5 +1,5 @@ from __future__ import division -from builtins import range, map, zip, filter +from builtins import range, zip, filter import numpy as np from numpy.lib.stride_tricks import as_strided as ast @@ -81,7 +81,7 @@ def nice_indices(arr): # surprisingly, this is slower for very small (and very large) inputs: # u,f,i = np.unique(arr,return_index=True,return_inverse=True) # arr[:] = np.arange(u.shape[0])[np.argsort(f)][i] - ids = collections.defaultdict(count().next) + ids = collections.defaultdict(next(count())) for idx,x in enumerate(arr): arr[idx] = ids[x] return arr @@ -146,7 +146,7 @@ def stateseq_hamming_error(sampledstates,truestates): def _sieve(stream): # just for fun; doesn't work over a few hundred - val = stream.next() + val = next(stream) yield val for x in filter(lambda x: x%val != 0, _sieve(stream)): yield x diff --git a/pyhsmm/util/stats.py b/pyhsmm/util/stats.py index ae7922ae37345d96fc252d545ddf359ae6d3ea9e..7fde82815345c8f00b0c074f236cc6519abdc276 100644 --- a/pyhsmm/util/stats.py +++ b/pyhsmm/util/stats.py @@ -8,7 +8,7 @@ import scipy.special as special import scipy.linalg from numpy.core.umath_tests import inner1d -import general +from . import general # TODO write cholesky versions diff --git a/pyhsmm/util/testing.py b/pyhsmm/util/testing.py index 1ee6dd4093489712e61bab907a1062253279e685..bf670710353299610ffd92e7f9aba1efee403949 100644 --- a/pyhsmm/util/testing.py +++ b/pyhsmm/util/testing.py @@ -3,7 +3,7 @@ import numpy as np from numpy import newaxis as na from matplotlib import pyplot as plt -import stats, general +from . import stats, general ######################### # statistical testing #