Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
""" A class for representing, simulating, and computing Markov Chains. """
""" Returns the Pandas DataFrame representation of the MarkovChain. """ return pd.DataFrame( data=self.matrix, index=self.states, columns=self.states )
""" Returns the transition matrix after n steps as a numpy matrix.
Parameters ---------- steps : int (optional) Number of steps. (default: 1)
Returns ------- Transition matrix """
""" Returns the transition matrix after n steps visually as a Pandas df.
Parameters ---------- steps : int (optional) Number of steps. (default: 1)
Returns ------- Pandas DataFrame """ return pd.DataFrame( data=self.get_transition_matrix(steps), index=self.states, columns=self.states )
""" Finds the distribution of states after n steps given a starting condition.
Parameters ---------- starting_condition : state or Table The initial distribution or the original state. n : integer Number of transition steps.
Returns ------- Table Shows the distribution after n steps
Examples -------- >>> states = make_array('A', 'B') >>> transition_matrix = np.array([[0.1, 0.9], ... [0.8, 0.2]]) >>> mc = MarkovChain.from_matrix(states, transition_matrix) >>> mc.distribution(start) State | Probability A | 0.24 B | 0.76 >>> mc.distribution(start, 0) State | Probability A | 0.8 B | 0.2 >>> mc.distribution(start, 3) State | Probability A | 0.3576 B | 0.6424 """ else:
else:
""" Finds the log-probability of a path given a starting condition.
May have better precision than `prob_of_path`.
Parameters ---------- starting_condition : state or Distribution If a state, finds the log-probability of the path starting at that state. If a Distribution, finds the probability of the path with the first element sampled from the Distribution path : ndarray Array of states
Returns ------- float log of probability
Examples -------- >>> states = make_array('A', 'B') >>> transition_matrix = np.array([[0.1, 0.9], ... [0.8, 0.2]]) >>> mc = MarkovChain.from_matrix(states, transition_matrix) >>> mc.log_prob_of_path('A', ['A', 'B', 'A']) -2.6310891599660815 >>> start = Table().states(['A', 'B']).probability([0.8, 0.2]) >>> mc.log_prob_of_path(start, ['A', 'B', 'A']) -0.55164761828624576 """ else:
""" Finds the probability of a path given a starting condition.
Parameters ---------- starting_condition : state or Distribution If a state, finds the probability of the path starting at that state. If a Distribution, finds the probability of the path with the first element sampled from the Distribution. path : ndarray Array of states
Returns ------- float probability
Examples -------- >>> states = ['A', 'B'] >>> transition_matrix = np.array([[0.1, 0.9], ... [0.8, 0.2]]) >>> mc = MarkovChain.from_matrix(states, transition_matrix) >>> mc.prob_of_path('A', ['A', 'B', 'A']) 0.072 >>> 0.1 * 0.9 * 0.8 0.072 >>> start = Table().states(['A', 'B']).probability([0.8, 0.2]) >>> mc.prob_of_path(start, ['A', 'B', 'A']) 0.576 >>> 0.8 * 0.9 * 0.8 0.576 """ else:
""" Simulates a path of n steps with a specific starting condition.
Parameters ---------- starting_condition : state or Distribution If a state, simulates n steps starting at that state. If a Distribution, samples from that distribution to find the starting state. steps : int Number of steps to take. plot_path : bool If True, plots the simulated path.
Returns ------- ndarray Array of sampled states.
Examples -------- >>> states = ['A', 'B'] >>> transition_matrix = np.array([[0.1, 0.9], ... [0.8, 0.2]]) >>> mc = MarkovChain.from_matrix(states, transition_matrix) >>> mc.simulate_path('A', 10) array(['A', 'A', 'B', 'A', 'B', 'A', 'B', 'B', 'A', 'B', 'B']) """ else:
""" Finds the stationary distribution of the Markov Chain.
Returns ------- Table Distribution.
Examples -------- >>> states = ['A', 'B'] >>> transition_matrix = np.array([[0.1, 0.9], ... [0.8, 0.2]]) >>> mc = MarkovChain.from_matrix(states, transition_matrix) >>> mc.steady_state() Value | Probability A | 0.666667 B | 0.333333 """ # Steady state is the left eigenvector that corresponds to eigenvalue=1.
# Find index of eigenvalue = 1.
""" Finds the expected return time of the Markov Chain (1 / steady state).
Returns ------- Table Expected Return Time
Examples -------- >>> states = ['A', 'B'] >>> transition_matrix = np.array([[0.1, 0.9], ... [0.8, 0.2]]) >>> mc = MarkovChain.from_matrix(states, transition_matrix) >>> mc.expected_return_time() Value | Expected Return Time A | 1.5 B | 3 """ 'Expected Return Time', 1 / expected_return )
""" Plots a Markov Chain's path.
Parameters ---------- starting_condition : state State to start at. path : iterable List of valid states.
Examples -------- >>> states = ['A', 'B'] # Works with all state data types! >>> transition_matrix = np.array([[0.1, 0.9], ... [0.8, 0.2]]) >>> mc = MarkovChain.from_matrix(states, transition_matrix) >>> mc.plot_path(mc.simulate_path('B', 20)) <Plot of a Markov Chain that starts at 'B' and takes 20 steps> """ raise Exception('Path not possible.')
return self.to_pandas()._repr_html_()
return self.to_pandas().__repr__()
return self.to_pandas().__str__()
def from_table(cls, table): """ Constructs a Markov Chain from a Table
Parameters ---------- table : Table A table with three columns for source state, target state, and probability.
Returns ------- MarkovChain
Examples -------- >>> table = Table().states(make_array('A', 'B')) \ ... .transition_probability(make_array(0.5, 0.5, 0.3, 0.7)) >>> table Source | Target | Probability A | A | 0.5 A | B | 0.5 B | A | 0.3 B | B | 0.7 >>> MarkovChain.from_table(table) A B A 0.5 0.5 B 0.3 0.7 """ 'Must have 3 columns: source, target, probability' 'Transition probabilities must sum to 1.'
# Get a list of the states.
def from_transition_function(cls, states, transition_function): """ Constructs a MarkovChain from a transition function.
Parameters ---------- states : iterable List of states. transition_function : function Bivariate transition function that maps two states to a probability.
Returns ------- MarkovChain
Examples -------- >>> states = make_array(1, 2) >>> def transition(s1, s2): ... if s1 == s2: ... return 0.7 ... else: ... return 0.3 >>> MarkovChain.from_transition_function(states, transition) 1 2 1 0.7 0.3 2 0.3 0.7 """ states[j])
def from_matrix(cls, states, transition_matrix): """ Constructs a MarkovChain from a transition matrix.
Parameters ---------- states : iterable List of states. transition_matrix : ndarray Square transition matrix.
Returns ------- MarkovChain
Examples -------- >>> states = [1, 2] >>> transition_matrix = np.array([[0.1, 0.9], ... [0.8, 0.2]]) >>> MarkovChain.from_matrix(states, transition_matrix) 1 2 1 0.1 0.9 2 0.8 0.2 """
""" Constructs a Markov Chain from the Table.
Returns ------- MarkovChain """ |