diff --git a/CHANGES.md b/CHANGES.md index 62dc736e6..46672efcf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,37 @@ +# v1.13.0, 2016-10-16 + +New strategy, state distribution and documentation + +- Adding Prober4 strategy + https://github.com/Axelrod-Python/Axelrod/pull/743 +- Adding state distribution to results set + https://github.com/Axelrod-Python/Axelrod/pull/742 +- More references for strategies + https://github.com/Axelrod-Python/Axelrod/pull/745 + +Here are all the commits for this PR: +https://github.com/Axelrod-Python/Axelrod/compare/v1.12.0...v1.13.0 + +# v1.12.0, 2016-10-13 + +Human interactive player, new strategy, under the hood improvements and +documentation. + +- You can play against an instance of `axelrod.Human` + https://github.com/Axelrod-Python/Axelrod/pull/732 +- Improved efficiency of result set from memory + https://github.com/Axelrod-Python/Axelrod/pull/737 +- Documentation improvements + https://github.com/Axelrod-Python/Axelrod/pull/741 + https://github.com/Axelrod-Python/Axelrod/pull/736 + https://github.com/Axelrod-Python/Axelrod/pull/735 + https://github.com/Axelrod-Python/Axelrod/pull/727 +- New strategy CyclerCCCDCD: + https://github.com/Axelrod-Python/Axelrod/pull/379 + +Here are all the commits for this PR: +https://github.com/Axelrod-Python/Axelrod/compare/v1.11.0...v1.12.0 + # v1.12.0, 2016-10-13 Human interactive player, new strategy, under the hood improvements and diff --git a/axelrod/result_set.py b/axelrod/result_set.py index 398e9b1c4..27d9d0e0f 100644 --- a/axelrod/result_set.py +++ b/axelrod/result_set.py @@ -1,12 +1,15 @@ import csv import tqdm -from collections import namedtuple +from collections import namedtuple, Counter from numpy import mean, nanmedian, std from . import eigen from .game import Game import axelrod.interaction_utils as iu +from axelrod import Actions + +C, D = Actions.C, Actions.D def update_progress_bar(method): @@ -141,7 +144,8 @@ def _build_players(self): del self.players_d # Manual garbage collection return players - def build_eigenmoses_rating(self): + @update_progress_bar + def _build_eigenmoses_rating(self): """ Returns: -------- @@ -154,7 +158,8 @@ def build_eigenmoses_rating(self): return eigenvector.tolist() - def build_eigenjesus_rating(self): + @update_progress_bar + def _build_eigenjesus_rating(self): """ Returns: -------- @@ -167,7 +172,8 @@ def build_eigenjesus_rating(self): return eigenvector.tolist() - def build_cooperating_rating(self): + @update_progress_bar + def _build_cooperating_rating(self): """ Returns: -------- @@ -197,7 +203,8 @@ def build_cooperating_rating(self): return [sum(cs) / max(1, float(sum(ls))) for cs, ls in zip(self.cooperation, lengths)] - def build_vengeful_cooperation(self): + @update_progress_bar + def _build_vengeful_cooperation(self): """ Returns: -------- @@ -210,7 +217,8 @@ def build_vengeful_cooperation(self): return [[2 * (element - 0.5) for element in row] for row in self.normalised_cooperation] - def build_payoff_diffs_means(self): + @update_progress_bar + def _build_payoff_diffs_means(self): """ Returns: -------- @@ -231,7 +239,8 @@ def build_payoff_diffs_means(self): for player in self.score_diffs] return payoff_diffs_means - def build_payoff_stddevs(self): + @update_progress_bar + def _build_payoff_stddevs(self): """ Returns: -------- @@ -267,7 +276,8 @@ def build_payoff_stddevs(self): return payoff_stddevs - def build_payoff_matrix(self): + @update_progress_bar + def _build_payoff_matrix(self): """ Returns: -------- @@ -301,7 +311,8 @@ def build_payoff_matrix(self): return payoff_matrix - def build_ranked_names(self): + @update_progress_bar + def _build_ranked_names(self): """ Returns: -------- @@ -311,7 +322,8 @@ def build_ranked_names(self): return [str(self.players[i]) for i in self.ranking] - def build_ranking(self): + @update_progress_bar + def _build_ranking(self): """ Returns: -------- @@ -326,6 +338,27 @@ def build_ranking(self): return sorted(range(self.nplayers), key=lambda i: -nanmedian(self.normalised_scores[i])) + @update_progress_bar + def _build_normalised_state_distribution(self): + """ + Returns + ---------- + + Normalised state distribution. A list of lists of counter objects: + + Dictionary where the keys are the states and the values are a + normalized counts of the number of times that state occurs. + """ + norm = [] + for player in self.state_distribution: + counters = [] + for counter in player: + total = sum(counter.values()) + counters.append(Counter({key: float(value) / total for + key, value in counter.items()})) + norm.append(counters) + return norm + def _build_empty_metrics(self, keep_interactions=False): """ Creates the various empty metrics ready to be updated as the data is @@ -350,6 +383,8 @@ def _build_empty_metrics(self, keep_interactions=False): self.cooperation = [[0 for opponent in plist] for player in plist] self.normalised_cooperation = [[[] for opponent in plist] for player in plist] + self.state_distribution = [[Counter() for opponent in plist] + for player in plist] self.good_partner_matrix = [[0 for opponent in plist] for player in plist] @@ -360,25 +395,84 @@ def _build_empty_metrics(self, keep_interactions=False): self.interactions = {} def _update_match_lengths(self, repetition, p1, p2, interaction): + """ + During a read of the data, update the match lengths attribute + + Parameters + ---------- + + repetition : int + The index of the repetition + p1, p2 : int + The indices of the first and second player + interaction : list + A list of tuples of interactions + """ self.match_lengths[repetition][p1][p2] = len(interaction) def _update_payoffs(self, p1, p2, scores_per_turn): + """ + During a read of the data, update the payoffs attribute + + Parameters + ---------- + + p1, p2 : int + The indices of the first and second player + scores_per_turn : tuples + A 2-tuple of the scores per turn for a given match + """ self.payoffs[p1][p2].append(scores_per_turn[0]) if p1 != p2: self.payoffs[p2][p1].append(scores_per_turn[1]) def _update_score_diffs(self, repetition, p1, p2, scores_per_turn): + """ + During a read of the data, update the score diffs attribute + + Parameters + ---------- + + p1, p2 : int + The indices of the first and second player + scores_per_turn : tuples + A 2-tuple of the scores per turn for a given match + """ diff = scores_per_turn[0] - scores_per_turn[1] self.score_diffs[p1][p2][repetition] = diff self.score_diffs[p2][p1][repetition] = -diff def _update_normalised_cooperation(self, p1, p2, interaction): + """ + During a read of the data, update the normalised cooperation attribute + + Parameters + ---------- + + p1, p2 : int + The indices of the first and second player + interaction : list of tuples + A list of interactions + """ normalised_cooperations = iu.compute_normalised_cooperation(interaction) self.normalised_cooperation[p1][p2].append(normalised_cooperations[0]) self.normalised_cooperation[p2][p1].append(normalised_cooperations[1]) def _update_wins(self, repetition, p1, p2, interaction): + """ + During a read of the data, update the wins attribute + + Parameters + ---------- + + repetition : int + The index of a repetition + p1, p2 : int + The indices of the first and second player + interaction : list of tuples + A list of interactions + """ match_winner_index = iu.compute_winner_index(interaction, game=self.game) index_pair = [p1, p2] @@ -387,27 +481,95 @@ def _update_wins(self, repetition, p1, p2, interaction): self.wins[winner_index][repetition] += 1 def _update_scores(self, repetition, p1, p2, interaction): + """ + During a read of the data, update the scores attribute + + Parameters + ---------- + + repetition : int + The index of a repetition + p1, p2 : int + The indices of the first and second player + interaction : list of tuples + A list of interactions + """ final_scores = iu.compute_final_score(interaction, game=self.game) for index, player in enumerate([p1, p2]): player_score = final_scores[index] self.scores[player][repetition] += player_score def _update_normalised_scores(self, repetition, p1, p2, scores_per_turn): + """ + During a read of the data, update the normalised scores attribute + + Parameters + ---------- + + repetition : int + The index of a repetition + p1, p2 : int + The indices of the first and second player + scores_per_turn : tuple + A 2 tuple with the scores per turn of each player + """ for index, player in enumerate([p1, p2]): score_per_turn = scores_per_turn[index] self.normalised_scores[player][repetition].append(score_per_turn) def _update_cooperation(self, p1, p2, cooperations): + """ + During a read of the data, update the cooperation attribute + + Parameters + ---------- + + p1, p2 : int + The indices of the first and second player + cooperations : tuple + A 2 tuple with the count of cooperation each player + """ self.cooperation[p1][p2] += cooperations[0] self.cooperation[p2][p1] += cooperations[1] + def _update_state_distribution(self, p1, p2, counter): + """ + During a read of the data, update the state_distribution attribute + + Parameters + ---------- + + p1, p2 : int + The indices of the first and second player + counter : collections.Counter + A counter object for the states of a match + """ + self.state_distribution[p1][p2] += counter + + counter[(C, D)], counter[(D, C)] = counter[(D, C)], counter[(C, D)] + self.state_distribution[p2][p1] += counter + def _update_good_partner_matrix(self, p1, p2, cooperations): + """ + During a read of the data, update the good partner matrix attribute + + Parameters + ---------- + + p1, p2 : int + The indices of the first and second player + cooperations : tuple + A 2 tuple with the count of cooperation each player + """ if cooperations[0] >= cooperations[1]: self.good_partner_matrix[p1][p2] += 1 if cooperations[1] >= cooperations[0]: self.good_partner_matrix[p2][p1] += 1 def _summarise_normalised_scores(self): + """ + At the end of a read of the data, finalise the normalised scores + """ for i, rep in enumerate(self.normalised_scores): for j, player_scores in enumerate(rep): if player_scores != []: @@ -420,6 +582,9 @@ def _summarise_normalised_scores(self): pass def _summarise_normalised_cooperation(self): + """ + At the end of a read of the data, finalise the normalised cooperation + """ for i, rep in enumerate(self.normalised_cooperation): for j, cooperation in enumerate(rep): if cooperation != []: @@ -432,7 +597,11 @@ def _summarise_normalised_cooperation(self): pass @update_progress_bar - def build_good_partner_rating(self): + def _build_good_partner_rating(self): + """ + At the end of a read of the data, build the good partner rating + attribute + """ return [sum(self.good_partner_matrix[player]) / max(1, float(self.total_interactions[player])) for player in range(self.nplayers)] @@ -466,6 +635,7 @@ def _build_score_related_metrics(self, progress_bar=False, scores_per_turn = iu.compute_final_score_per_turn(interaction, game=self.game) cooperations = iu.compute_cooperations(interaction) + state_counter = iu.compute_state_distribution(interaction) self._update_match_lengths(repetition, p1, p2, interaction) self._update_payoffs(p1, p2, scores_per_turn) @@ -483,29 +653,40 @@ def _build_score_related_metrics(self, progress_bar=False, self._update_normalised_scores(repetition, p1, p2, scores_per_turn) self._update_cooperation(p1, p2, cooperations) + self._update_state_distribution(p1, p2, state_counter) self._update_good_partner_matrix(p1, p2, cooperations) if progress_bar: - self.progress_bar = tqdm.tqdm(total=10 + 2 * self.nplayers, + self.progress_bar = tqdm.tqdm(total=11 + 2 * self.nplayers, desc="Finishing") self._summarise_normalised_scores() self._summarise_normalised_cooperation() - self.ranking = self.build_ranking() - self.ranked_names = self.build_ranked_names() - self.payoff_matrix = self.build_payoff_matrix() - self.payoff_stddevs = self.build_payoff_stddevs() - self.payoff_diffs_means = self.build_payoff_diffs_means() - self.vengeful_cooperation = self.build_vengeful_cooperation() - self.cooperating_rating = self.build_cooperating_rating() - self.good_partner_rating = self.build_good_partner_rating() - self.eigenjesus_rating = self.build_eigenjesus_rating() - self.eigenmoses_rating = self.build_eigenmoses_rating() + self.ranking = self._build_ranking() + self.normalised_state_distribution = self._build_normalised_state_distribution() + self.ranked_names = self._build_ranked_names() + self.payoff_matrix = self._build_payoff_matrix() + self.payoff_stddevs = self._build_payoff_stddevs() + self.payoff_diffs_means = self._build_payoff_diffs_means() + self.vengeful_cooperation = self._build_vengeful_cooperation() + self.cooperating_rating = self._build_cooperating_rating() + self.good_partner_rating = self._build_good_partner_rating() + self.eigenjesus_rating = self._build_eigenjesus_rating() + self.eigenmoses_rating = self._build_eigenmoses_rating() if progress_bar: self.progress_bar.close() def __eq__(self, other): + """ + Check equality of results set + + Parameters + ---------- + + other : axelrod.ResultSet + Another results set against which to check equality + """ return all([self.wins == other.wins, self.match_lengths == other.match_lengths, self.scores == other.scores, @@ -527,6 +708,15 @@ def __eq__(self, other): self.eigenjesus_rating == other.eigenjesus_rating]) def __ne__(self, other): + """ + Check inequality of results set + + Parameters + ---------- + + other : axelrod.ResultSet + Another results set against which to check inequality + """ return not self.__eq__(other) def summarise(self): @@ -548,14 +738,30 @@ def summarise(self): median_wins = map(nanmedian, self.wins) self.player = namedtuple("Player", ["Rank", "Name", "Median_score", - "Cooperation_rating", "Wins"]) + "Cooperation_rating", "Wins", + "CC_rate", "CD_rate", "DC_rate", + "DD_rate"]) + + states = [(C, C), (C, D), (D, C), (D, D)] + state_prob = [] + for i, player in enumerate(self.normalised_state_distribution): + counts = [] + for state in states: + p = sum([opp[state] for j, opp in enumerate(player) if i != j]) + counts.append(p) + try: + counts = [float(c) / sum(counts) for c in counts] + except ZeroDivisionError: + counts = [0 for c in counts] + state_prob.append(counts) - summary_data = [perf for perf in zip(self.players, - median_scores, - self.cooperating_rating, - median_wins)] - summary_data = [self.player(rank, *summary_data[i]) for - rank, i in enumerate(self.ranking)] + summary_measures = list(zip(self.players, median_scores, + self.cooperating_rating, median_wins)) + + summary_data = [] + for rank, i in enumerate(self.ranking): + data = list(summary_measures[i]) + state_prob[i] + summary_data.append(self.player(rank, *data)) return summary_data @@ -563,7 +769,7 @@ def write_summary(self, filename): """ Write a csv file containing summary data of the results of the form: - "Rank", "Name", "Median-score-per-turn", "Cooperation-rating" + "Rank", "Name", "Median-score-per-turn", "Cooperation-rating", "Wins", "CC-Rate", "CD-Rate", "DC-Rate", "DD-rate" Parameters ---------- @@ -601,6 +807,8 @@ def read_match_chunks(self, progress_bar=False): players_pair = [self.players[i] for i in match_pair] repetitions = [list(match_pair) + players_pair + rep for rep in interactions] + if progress_bar: + progress_bar.update() yield repetitions if progress_bar: diff --git a/axelrod/strategies/_strategies.py b/axelrod/strategies/_strategies.py index d4f92fabe..873dce92a 100644 --- a/axelrod/strategies/_strategies.py +++ b/axelrod/strategies/_strategies.py @@ -47,7 +47,7 @@ from .mindreader import MindReader, ProtectedMindReader, MirrorMindReader from .mutual import Desperate, Hopeless, Willing from .oncebitten import OnceBitten, FoolMeOnce, ForgetfulFoolMeOnce, FoolMeForever -from .prober import (Prober, Prober2, Prober3, HardProber, +from .prober import (Prober, Prober2, Prober3, Prober4, HardProber, NaiveProber, RemorsefulProber) from .punisher import Punisher, InversePunisher from .qlearner import RiskyQLearner, ArrogantQLearner, HesitantQLearner, CautiousQLearner @@ -165,6 +165,7 @@ Prober, Prober2, Prober3, + Prober4, ProtectedMindReader, Punisher, Raider, diff --git a/axelrod/strategies/axelrod_first.py b/axelrod/strategies/axelrod_first.py index 97064e2e9..3c909302f 100644 --- a/axelrod/strategies/axelrod_first.py +++ b/axelrod/strategies/axelrod_first.py @@ -12,8 +12,16 @@ class Davis(Player): - """A player starts by cooperating for 10 rounds then plays Grudger, - defecting if at any point the opponent has defected.""" + """ + Submitted to Axelrod's first tournament by Morton Davis. + + A player starts by cooperating for 10 rounds then plays Grudger, + defecting if at any point the opponent has defected. + + Names: + + - Davis: [Axelrod1980]_ + """ name = 'Davis' classifier = { @@ -49,8 +57,11 @@ def strategy(self, opponent): class RevisedDowning(Player): """Revised Downing attempts to determine if players are cooperative or not. - If so, it cooperates with them. This strategy would have won Axelrod's first - tournament. + If so, it cooperates with them. This strategy would have won Axelrod's first tournament. + + Names: + + - Revised Downing: [Axelrod1980]_ """ name = "Revised Downing" @@ -127,8 +138,14 @@ def reset(self): class Feld(Player): """ + Submitted to Axelrod's first tournament by Scott Feld. + Defects when opponent defects. Cooperates with a probability that decreases to 0.5 at round 200. + + Names: + + - Feld: [Axelrod1980]_ """ name = "Feld" @@ -182,9 +199,15 @@ def strategy(self, opponent): class Grofman(Player): """ + Submitted to Axelrod's first tournament by Bernard Grofman. + Cooperate on the first 2 moves. Return opponent's move for the next 5. Then cooperate if the last round's moves were the same, otherwise cooperate with probability 2/7. + + Names: + + - Grofman: [Axelrod1980]_ """ name = "Grofman" @@ -212,6 +235,8 @@ def strategy(self, opponent): class Joss(MemoryOnePlayer): """ + Submitted to Axelrod's first tournament by Johann Joss. + Cooperates with probability 0.9 when the opponent cooperates, otherwise emulates Tit-For-Tat. @@ -242,21 +267,18 @@ def __repr__(self): class Nydegger(Player): """ - The program begins with tit for tat for the first three moves, except that - if it was the only one to cooperate on the first move and the only one to - defect on the second move, it defects on the third move. After the third move, - its choice is determined from the 3 preceding outcomes in the following manner. - Let A be the sum formed by counting the other's defection as 2 points and one's - own as 1 point, and giving weights of 16, 4, and 1 to the preceding three - moves in chronological order. The choice can be described as defecting only - when A equals 1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, - 55, 58, or 61. Thus if all three preceding moves are mutual defection, - A = 63 and the rule cooperates. This rule was designed for use in laboratory - experiments as a stooge which had a memory and appeared to be trustworthy, - potentially cooperative, but not gullible. - - -- Axelrod, "Effective Choice in the Prisoner's Dilemma" + Submitted to Axelrod's first tournament by Rudy Nydegger. + + The program begins with tit for tat for the first three moves, except + that if it was the only one to cooperate on the first move and the only one to defect on the second move, it defects on the third move. After the third move, its choice is determined from the 3 preceding outcomes in the following manner. + + Let A be the sum formed by counting the other's defection as 2 points and one's own as 1 point, and giving weights of 16, 4, and 1 to the preceding three moves in chronological order. The choice can be described as defecting only when A equals 1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, or 61. + Thus if all three preceding moves are mutual defection, A = 63 and the rule cooperates. This rule was designed for use in laboratory experiments as a stooge which had a memory and appeared to be trustworthy, potentially cooperative, but not gullible. + + Names: + + - Nydegger: [Axelrod1980]_ """ name = "Nydegger" @@ -309,9 +331,15 @@ def strategy(self, opponent): class Shubik(Player): """ + Submitted to Axelrod's first tournament by Martin Shubik. + Plays like Tit-For-Tat with the following modification. After each retaliation, the number of rounds that Shubik retaliates increases by 1. + + Names: + + - Shubik: [Axelrod1980]_ """ name = 'Shubik' @@ -371,8 +399,15 @@ def reset(self): class Tullock(Player): """ + Submitted to Axelrod's first tournament by Gordon Tullock. + Cooperates for the first 11 rounds then randomly cooperates 10% less often - than the opponent has in previous rounds.""" + than the opponent has in previous rounds. + + Names: + + - Tullock: [Axelrod1980]_ + """ name = "Tullock" classifier = { @@ -423,7 +458,10 @@ class UnnamedStrategy(Player): score than the other player. Unfortunately, the complex process of adjustment frequently left the probability of cooperation in the 30% to 70% range, and therefore the rule appeared random to many other players. - -- Axelrod, "Effective Choice in the Prisoner's Dilemma" + + Names: + + - Unnamed Strategy: [Axelrod1980]_ Warning: This strategy is not identical to the original strategy (source unavailable) and was written based on published descriptions. diff --git a/axelrod/strategies/axelrod_second.py b/axelrod/strategies/axelrod_second.py index 8350369f6..90d1c668c 100644 --- a/axelrod/strategies/axelrod_second.py +++ b/axelrod/strategies/axelrod_second.py @@ -12,6 +12,12 @@ class Champion(Player): """ Strategy submitted to Axelrod's second tournament by Danny Champion. + + This player cooperates on the first 10 moves and plays Tit for Tat for the next 15 more moves. After 25 moves, the program cooperates unless all the following are true: the other player defected on the previous move, the other player cooperated less than 60% and the random number between 0 and 1 is greater that the other player's cooperation rate. + + Names: + + - Champion: [Axelrod1980b]_ """ name = "Champion" @@ -48,6 +54,12 @@ def strategy(self, opponent): class Eatherley(Player): """ Strategy submitted to Axelrod's second tournament by Graham Eatherley. + + A player that keeps track of how many times in the game the other player defected. After the other player defects, it defects with a probability equal to the ratio of the other's total defections to the total moves to that point. + + Names: + + - Eatherley: [Axelrod1980b]_ """ name = "Eatherley" @@ -79,8 +91,11 @@ class Tester(Player): """ Submitted to Axelrod's second tournament by David Gladstein. - Defects on the first move and plays TFT if the opponent ever defects (after - one apology cooperation round). Otherwise alternate cooperation and defection. + Defects on the first move and plays Tit For Tat if the opponent ever defects (after one apology cooperation round). Otherwise alternate cooperation and defection. + + Names: + + - Tester: [Axelrod1980b]_ """ name = "Tester" diff --git a/axelrod/strategies/prober.py b/axelrod/strategies/prober.py index a6306ec7e..ccc2df3c8 100644 --- a/axelrod/strategies/prober.py +++ b/axelrod/strategies/prober.py @@ -102,6 +102,68 @@ def strategy(self, opponent): return D if opponent.history[-1:] == [D] else C +class Prober4(Player): + """ + Plays C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D initially. + Counts retaliating and provocative defections of the opponent. + If the absolute difference between the counts is smaller or equal to 2, + defects forever. + Otherwise plays C for the next 5 turns and TFT for the rest of the game. + + Names: + + - prober4: [PRISON1998]_ + """ + + name = 'Prober 4' + classifier = { + 'stochastic': False, + 'memory_depth': float('inf'), + 'makes_use_of': set(), + 'long_run_time': False, + 'inspects_source': False, + 'manipulates_source': False, + 'manipulates_state': False + } + + @init_args + def __init__(self): + Player.__init__(self) + self.init_sequence = [ + C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D + ] + self.just_Ds = 0 + self.unjust_Ds = 0 + self.turned_defector = False + + def strategy(self, opponent): + if not self.history: + return self.init_sequence[0] + turn = len(self.history) + if turn < len(self.init_sequence): + if opponent.history[-1] == D: + if self.history[-1] == D: + self.just_Ds += 1 + if self.history[-1] == C: + self.unjust_Ds += 1 + return self.init_sequence[turn] + if turn == len(self.init_sequence): + diff_in_Ds = abs(self.just_Ds - self.unjust_Ds) + self.turned_defector = (diff_in_Ds <= 2) + if self.turned_defector: + return D + if not self.turned_defector: + if turn < len(self.init_sequence) + 5: + return C + return D if opponent.history[-1] == D else C + + def reset(self): + Player.reset(self) + self.just_Ds = 0 + self.unjust_Ds = 0 + self.turned_defector = False + + class HardProber(Player): """ Plays D, D, C, C initially. Defects forever if opponent cooperated in moves diff --git a/axelrod/tests/unit/test_prober.py b/axelrod/tests/unit/test_prober.py index 0f2cb7c67..c53fde0b1 100644 --- a/axelrod/tests/unit/test_prober.py +++ b/axelrod/tests/unit/test_prober.py @@ -99,6 +99,79 @@ def test_strategy(self): self.responses_test([D, C, C, D, C], [C, D, C, C], [C]) +class TestProber4(TestPlayer): + + name = "Prober 4" + player = axelrod.Prober4 + expected_classifier = { + 'stochastic': False, + 'memory_depth': float('inf'), + 'makes_use_of': set(), + 'long_run_time': False, + 'inspects_source': False, + 'manipulates_source': False, + 'manipulates_state': False + } + initial_sequence = [ + C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D + ] + + def test_initial_strategy(self): + """Starts by playing CCDCDDDCCDCDCCDCDDCD.""" + self.responses_test([], [], self.initial_sequence) + + def test_strategy(self): + # After playing the initial sequence defects forever + # if the absolute difference in the number of retaliating + # and provocative defections of the opponent is smaller or equal to 2 + + provocative_histories = [ + [C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, D, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, C, D, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D], + ] + + history1 = self.initial_sequence + responses = [D] * 10 + attrs = {'turned_defector': True} + for history2 in provocative_histories: + self.responses_test(history1, history2, responses, attrs=attrs) + + # Otherwise cooperates for 5 rounds + unprovocative_histories = [ + [C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D], + [D, D, C, D, C, C, C, D, D, C, D, C, D, D, C, D, C, C, D, C], + [C, C, D, C, D, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, C, D, C, D, D, C, C, D, C, C, C, C, C, C, D, D, D, C, C], + [C, C, C, C, D, D, C, C, D, C, C, D, D, C, D, C, D, C, C, C], + ] + + responses = [C] * 5 + attrs = {'turned_defector': False} + for history2 in unprovocative_histories: + self.responses_test(history1, history2, responses, attrs=attrs) + + # and plays like TFT afterwards + history1 += responses + history2 += responses + self.responses_test(history1, history2, [C], attrs=attrs) + + history1 += [C] + history2 += [D] + self.responses_test(history1, history2, [D], attrs=attrs) + + history1 += [D] + history2 += [C] + self.responses_test(history1, history2, [C], attrs=attrs) + + history1 += [C] + history2 += [D] + self.responses_test(history1, history2, [D], attrs=attrs) + + class TestHardProber(TestPlayer): name = "Hard Prober" diff --git a/axelrod/tests/unit/test_resultset.py b/axelrod/tests/unit/test_resultset.py index 2b3975ee4..8812afe18 100644 --- a/axelrod/tests/unit/test_resultset.py +++ b/axelrod/tests/unit/test_resultset.py @@ -5,6 +5,7 @@ from numpy import mean, std, nanmedian import csv +from collections import Counter from hypothesis import given, settings from axelrod.tests.property import tournaments, prob_end_tournaments @@ -82,7 +83,6 @@ def setUpClass(cls): [[17/5.0 for _ in range(3)], [9/5.0 for _ in range(3)], []] ] - norm_scores = cls.expected_normalised_scores cls.expected_score_diffs = [ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], @@ -120,12 +120,40 @@ def setUpClass(cls): [0, 0, 0], ] + cls.expected_state_distribution = [ + [], [], [] + ] + cls.expected_normalised_cooperation = [ [0, mean([3 / 5.0 for _ in range(3)]), mean([3 / 5.0 for _ in range(3)])], [mean([3 / 5.0 for _ in range(3)]), 0, mean([1 / 5.0 for _ in range(3)])], [0, 0, 0], ] + cls.expected_state_distribution = [ + [Counter(), + Counter({('D', 'C'): 6, ('C', 'D'): 6, ('C', 'C'): 3}), + Counter({('C', 'D'): 9, ('D', 'D'): 6})], + [Counter({('D', 'C'): 6, ('C', 'D'): 6, ('C', 'C'): 3}), + Counter(), + Counter({('D', 'D'): 12, ('C', 'D'): 3})], + [Counter({('D', 'C'): 9, ('D', 'D'): 6}), + Counter({('D', 'D'): 12, ('D', 'C'): 3}), + Counter()] + ] + + cls.expected_normalised_state_distribution = [ + [Counter(), + Counter({('D', 'C'): 0.4, ('C', 'D'): 0.4, ('C', 'C'): 0.2}), + Counter({('C', 'D'): 0.6, ('D', 'D'): 0.4})], + [Counter({('D', 'C'): 0.4, ('C', 'D'): 0.4, ('C', 'C'): 0.2}), + Counter(), + Counter({('D', 'D'): 0.8, ('C', 'D'): 0.2})], + [Counter({('D', 'C'): 0.6, ('D', 'D'): 0.4}), + Counter({('D', 'D'): 0.8, ('D', 'C'): 0.2}), + Counter()] + ] + cls.expected_vengeful_cooperation = [[2 * element - 1 for element in row] for row in cls.expected_normalised_cooperation] @@ -195,12 +223,14 @@ def test_init_with_different_game(self): def test_with_progress_bar(self): rs = axelrod.ResultSet(self.players, self.interactions) self.assertTrue(rs.progress_bar) - self.assertEqual(rs.progress_bar.total, 10 + 2 * rs.nplayers) + self.assertEqual(rs.progress_bar.total, 11 + 2 * rs.nplayers) + self.assertEqual(rs.progress_bar.n, rs.progress_bar.total) rs = axelrod.ResultSet(self.players, self.interactions, progress_bar=True) self.assertTrue(rs.progress_bar) - self.assertEqual(rs.progress_bar.total, 10 + 2 * rs.nplayers) + self.assertEqual(rs.progress_bar.total, 11 + 2 * rs.nplayers) + self.assertEqual(rs.progress_bar.n, rs.progress_bar.total) def test_match_lengths(self): rs = axelrod.ResultSet(self.players, self.interactions, @@ -323,6 +353,22 @@ def test_normalised_cooperation(self): self.assertEqual(rs.normalised_cooperation, self.expected_normalised_cooperation) + def test_state_distribution(self): + rs = axelrod.ResultSet(self.players, self.interactions, + progress_bar=False) + self.assertIsInstance(rs.state_distribution, list) + self.assertEqual(len(rs.state_distribution), rs.nplayers) + self.assertEqual(rs.state_distribution, + self.expected_state_distribution) + + def test_state_normalised_distribution(self): + rs = axelrod.ResultSet(self.players, self.interactions, + progress_bar=False) + self.assertIsInstance(rs.normalised_state_distribution, list) + self.assertEqual(len(rs.normalised_state_distribution), rs.nplayers) + self.assertEqual(rs.normalised_state_distribution, + self.expected_normalised_state_distribution) + def test_vengeful_cooperation(self): rs = axelrod.ResultSet(self.players, self.interactions, progress_bar=False) @@ -376,7 +422,7 @@ def test_self_interaction_for_random_strategies(self): axelrod.seed(0) players = [s() for s in axelrod.demo_strategies] tournament = axelrod.Tournament(players, repetitions=2, turns=5) - results = tournament.play() + results = tournament.play(progress_bar=False) self.assertEqual(results.payoff_diffs_means[-1][-1], 1.0) def test_equality(self): @@ -386,7 +432,7 @@ def test_equality(self): players = [s() for s in axelrod.demo_strategies] tournament = axelrod.Tournament(players, repetitions=2, turns=5) - results = tournament.play() + results = tournament.play(progress_bar=False) self.assertNotEqual(results, rs_sets[0]) def test_summarise(self): @@ -413,6 +459,9 @@ def test_summarise(self): self.assertEqual([float(player.Wins) for player in sd], ranked_median_wins) + for player in sd: + self.assertEqual(player.CC_rate + player.CD_rate + player.DC_rate + player.DD_rate, 1) + def test_write_summary(self): rs = axelrod.ResultSet(self.players, self.interactions, progress_bar=False) @@ -422,22 +471,20 @@ def test_write_summary(self): csvreader = csv.reader(csvfile) for row in csvreader: ranked_names.append(row[1]) - self.assertEqual(len(row), 5) + self.assertEqual(len(row), 9) self.assertEqual(ranked_names[0], "Name") self.assertEqual(ranked_names[1:], rs.ranked_names) - - class TestResultSetFromFile(unittest.TestCase): filename = "test_outputs/test_results_from_file.csv" players = [axelrod.Cooperator(), axelrod.TitForTat(), axelrod.Defector()] tournament = axelrod.Tournament(players=players, turns=2, repetitions=3) - tournament.play(filename=filename) + tournament.play(filename=filename, progress_bar=False) - interactions = iu.read_interactions_from_file(filename) + interactions = iu.read_interactions_from_file(filename, progress_bar=False) def test_init(self): brs = axelrod.ResultSetFromFile(self.filename, progress_bar=False) @@ -448,7 +495,7 @@ def test_init(self): def test_init_with_different_game(self): game = axelrod.Game(p=-1, r=-1, s=-1, t=-1) brs = axelrod.ResultSetFromFile(self.filename, progress_bar=False, - game=game) + game=game) self.assertEqual(brs.game.RPST(), (-1, -1, -1, -1)) def test_init_with_progress_bar(self): @@ -460,7 +507,7 @@ def test_init_with_progress_bar(self): def test_init_with_num_interactions(self): """Just able to test that no error occurs""" - brs = axelrod.ResultSetFromFile(self.filename, progress_bar=True, + brs = axelrod.ResultSetFromFile(self.filename, progress_bar=False, num_interactions=18) self.assertEqual(brs.nplayers, len(self.players)) self.assertEqual(brs.repetitions, 3) @@ -468,7 +515,7 @@ def test_init_with_num_interactions(self): def test_init_with_players_repetitions(self): """Just able to test that no error occurs""" - brs = axelrod.ResultSetFromFile(self.filename, progress_bar=True, + brs = axelrod.ResultSetFromFile(self.filename, progress_bar=False, num_interactions=18, repetitions=3, players=[str(p) for p in self.players]) self.assertEqual(brs.nplayers, len(self.players)) @@ -498,7 +545,8 @@ def test_equality_with_round_robin(self, tournament): tournament.play(filename=filename, progress_bar=False, build_results=False) brs = axelrod.ResultSetFromFile(filename, progress_bar=False) - interactions = iu.read_interactions_from_file(filename) + interactions = iu.read_interactions_from_file(filename, + progress_bar=False) rs = axelrod.ResultSet(tournament.players, interactions, progress_bar=False) @@ -521,7 +569,8 @@ def test_equality_with_prob_end(self, tournament): tournament.play(filename=filename, progress_bar=False, build_results=False) brs = axelrod.ResultSetFromFile(filename, progress_bar=False) - interactions = iu.read_interactions_from_file(filename) + interactions = iu.read_interactions_from_file(filename, + progress_bar=False) rs = axelrod.ResultSet(tournament.players, interactions, progress_bar=False) @@ -717,7 +766,6 @@ def setUpClass(cls): [[17/5.0 for _ in range(3)], [], []] ] - norm_scores = cls.expected_normalised_scores cls.expected_score_diffs = [ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], @@ -794,8 +842,25 @@ def setUpClass(cls): 0.5488212999484519 ] - cls.expected_csv = ( - 'Defector,Tit For Tat,Alternator\n3.4,2.6,1.5\n3.4,2.6,1.5\n3.4,2.6,1.5\n') + cls.expected_state_distribution = [ + [Counter(), + Counter({('C', 'C'): 3, ('C', 'D'): 6, ('D', 'C'): 6}), + Counter({('C', 'D'): 9, ('D', 'D'): 6})], + [Counter({('C', 'C'): 3, ('C', 'D'): 6, ('D', 'C'): 6}), + Counter(), + Counter()], + [Counter({('D', 'C'): 9, ('D', 'D'): 6}), Counter(), Counter()] + ] + + cls.expected_normalised_state_distribution = [ + [Counter(), + Counter({('C', 'C'): 0.2, ('C', 'D'): 0.4, ('D', 'C'): 0.4}), + Counter({('C', 'D'): 0.6, ('D', 'D'): 0.4})], + [Counter({('C', 'C'): 0.2, ('C', 'D'): 0.4, ('D', 'C'): 0.4}), + Counter(), + Counter()], + [Counter({('D', 'C'): 0.6, ('D', 'D'): 0.4}), Counter(), Counter()] + ] def test_match_lengths(self): """ @@ -906,7 +971,6 @@ def setUpClass(cls): [[], [], [0 for _ in range(3)], []] ] - norm_scores = cls.expected_normalised_scores cls.expected_score_diffs = [ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], @@ -1000,8 +1064,31 @@ def setUpClass(cls): 0.1633132292825755 ] - cls.expected_csv = ( - "Defector,Alternator,Tit For Tat,Cooperator\n5.0,2.6,2.6,0.0\n5.0,2.6,2.6,0.0\n5.0,2.6,2.6,0.0\n") + cls.expected_state_distribution = [ + [Counter(), + Counter({('C', 'C'): 3, ('C', 'D'): 6, ('D', 'C'): 6}), + Counter(), + Counter()], + [Counter({('C', 'C'): 3, ('C', 'D'): 6, ('D', 'C'): 6}), + Counter(), + Counter(), + Counter()], + [Counter(), Counter(), Counter(), Counter({('D', 'C'): 15})], + [Counter(), Counter(), Counter({('C', 'D'): 15}), Counter()] + ] + + cls.expected_normalised_state_distribution = [ + [Counter(), + Counter({('C', 'C'): 0.2, ('C', 'D'): 0.4, ('D', 'C'): 0.4}), + Counter(), + Counter()], + [Counter({('C', 'C'): 0.2, ('C', 'D'): 0.4, ('D', 'C'): 0.4}), + Counter(), + Counter(), + Counter()], + [Counter(), Counter(), Counter(), Counter({('D', 'C'): 1.0})], + [Counter(), Counter(), Counter({('C', 'D'): 1.0}), Counter()] + ] class TestResultSetSpatialStructureThree(TestResultSetSpatialStructure): @@ -1067,7 +1154,6 @@ def setUpClass(cls): [[], [], [], [15 /5.0 for _ in range(3)]] ] - norm_scores = cls.expected_normalised_scores cls.expected_score_diffs = [ [[0.0 for _ in range(3)] for _ in range(4) ] for _ in range(4) ] @@ -1131,9 +1217,51 @@ def setUpClass(cls): 0.3985944056208427 ] - cls.expected_csv = ( - 'Alternator,Tit For Tat,Defector,Cooperator\nnan,nan,nan,nan\nnan,nan,nan,nan\nnan,nan,nan,nan\n') + cls.expected_state_distribution = [ + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()] + ] + + cls.expected_normalised_state_distribution = [ + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()] + ] + def test_equality(self): """Overwriting for this particular case""" pass + + def test_summarise(self): + """Overwriting for this particular case""" + rs = axelrod.ResultSet(self.players, self.interactions, + progress_bar=False) + sd = rs.summarise() + + for player in sd: + self.assertEqual(player.CC_rate, 0) + self.assertEqual(player.CD_rate, 0) + self.assertEqual(player.DC_rate, 0) + self.assertEqual(player.DD_rate, 0) + + +class TestSummary(unittest.TestCase): + """Separate test to check that summary always builds without failures""" + @given(tournament=tournaments(max_size=5, + max_turns=5, + max_repetitions=3)) + @settings(max_examples=50, timeout=0) + def test_summarise_without_failure(self, tournament): + results = tournament.play(progress_bar=False) + sd = results.summarise() + self.assertIsInstance(sd, list) + + for player in sd: + # round for numerical error + total_rate = round(player.CC_rate + player.CD_rate + + player.DC_rate + player.DD_rate, 3) + self.assertTrue(total_rate in [0, 1]) diff --git a/axelrod/tests/unit/test_tournament.py b/axelrod/tests/unit/test_tournament.py index 2ae78303d..e67de4c04 100644 --- a/axelrod/tests/unit/test_tournament.py +++ b/axelrod/tests/unit/test_tournament.py @@ -160,7 +160,7 @@ def test_no_progress_bar_play(self): results = tournament.play(progress_bar=False, build_results=False, filename=self.filename) self.assertIsNone(results) - results = axelrod.ResultSetFromFile(self.filename) + results = axelrod.ResultSetFromFile(self.filename, progress_bar=False) self.assertIsInstance(results, axelrod.ResultSet) self.assertRaises(AttributeError, call_progress_bar) @@ -176,10 +176,14 @@ def test_progress_bar_play(self): results = tournament.play() self.assertIsInstance(results, axelrod.ResultSet) self.assertEqual(tournament.progress_bar.total, 15) + self.assertEqual(tournament.progress_bar.total, + tournament.progress_bar.n) results = tournament.play(progress_bar=True) self.assertIsInstance(results, axelrod.ResultSet) self.assertEqual(tournament.progress_bar.total, 15) + self.assertEqual(tournament.progress_bar.total, + tournament.progress_bar.n) # Test without build results results = tournament.play(progress_bar=True, build_results=False, @@ -188,6 +192,8 @@ def test_progress_bar_play(self): results = axelrod.ResultSetFromFile(self.filename) self.assertIsInstance(results, axelrod.ResultSet) self.assertEqual(tournament.progress_bar.total, 15) + self.assertEqual(tournament.progress_bar.total, + tournament.progress_bar.n) @unittest.skipIf(axelrod.on_windows, "Parallel processing not supported on Windows") @@ -413,7 +419,7 @@ def test_no_build_result_set(self): self.assertIsNone(results) # Checking that results were written properly - results = axelrod.ResultSetFromFile(self.filename) + results = axelrod.ResultSetFromFile(self.filename, progress_bar=False) self.assertIsInstance(results, axelrod.ResultSet) @given(turns=integers(min_value=1, max_value=200)) diff --git a/axelrod/version.py b/axelrod/version.py index b518f6eed..9a34ccc9f 100644 --- a/axelrod/version.py +++ b/axelrod/version.py @@ -1 +1 @@ -__version__ = "1.12.0" +__version__ = "1.13.0" diff --git a/docs/reference/bibliography.rst b/docs/reference/bibliography.rst index 0ee9e3e58..55eb55cff 100644 --- a/docs/reference/bibliography.rst +++ b/docs/reference/bibliography.rst @@ -7,6 +7,7 @@ This is a collection of various bibliographic items referenced in the documentation. .. [Axelrod1980] Axelrod, R. (1980). Effective Choice in the Prisoner’s Dilemma. Journal of Conflict Resolution, 24(1), 3–25. +.. [Axelrod1980b] Axelrod, R. (1980). More Effective Choice in the Prisoner’s Dilemma. Journal of Conflict Resolution, 24(3), 379-403. .. [Axelrod1984] The Evolution of Cooperation. Basic Books. ISBN 0-465-02121-2. .. [Axelrod1995] Wu, J. and Axelrod, R. (1995). How to cope with noise in the Iterated prisoner’s dilemma, Journal of Conflict Resolution, 39(1), pp. 183–189. doi: 10.1177/0022002795039001008. .. [Banks1980] Banks, J. S., & Sundaram, R. K. (1990). Repeated games, finite automata, and complexity. Games and Economic Behavior, 2(2), 97–117. http://doi.org/10.1016/0899-8256(90)90024-O diff --git a/docs/tutorials/getting_started/tournament_results.rst b/docs/tutorials/getting_started/tournament_results.rst index 8fb2c52dd..50aa8e881 100644 --- a/docs/tutorials/getting_started/tournament_results.rst +++ b/docs/tutorials/getting_started/tournament_results.rst @@ -20,6 +20,10 @@ This tutorial will show you how to access the various results of a tournament: - Payoff difference means: the mean score differences. - Cooperation counts: the number of times each player cooperated. - Normalised cooperation: cooperation count per turn. +- Normalised cooperation: cooperation count per turn. +- State distribution: the count of each type of state of a match +- Normalised state distribution: the normalised count of each type of state of a + match - Cooperation rating: cooperation rating of each player - Vengeful cooperation: a morality metric from the literature (see :ref:`morality-metrics`). @@ -210,6 +214,59 @@ We see that :code:`Cooperator` for all the rounds (as expected):: >>> results.normalised_cooperation[0] [1.0, 1.0, 1.0, 1.0] +State distribution counts +------------------------- + +This gives a total state count against each opponent. A state corresponds to 1 +turn of a match and can be one of :code:`('C', 'C'), ('C', 'D'), ('D', 'C'), +('D', 'D')` where the first element is the action of the player in question and +the second the action of the opponent:: + + >>> pprint.pprint(results.state_distribution) + [[Counter(), + Counter({('C', 'D'): 30}), + Counter({('C', 'C'): 30}), + Counter({('C', 'C'): 30})], + [Counter({('D', 'C'): 30}), + Counter(), + Counter({('D', 'D'): 27, ('D', 'C'): 3}), + Counter({('D', 'D'): 27, ('D', 'C'): 3})], + [Counter({('C', 'C'): 30}), + Counter({('D', 'D'): 27, ('C', 'D'): 3}), + Counter(), + Counter({('C', 'C'): 30})], + [Counter({('C', 'C'): 30}), + Counter({('D', 'D'): 27, ('C', 'D'): 3}), + Counter({('C', 'C'): 30}), + Counter()]] + +Normalised state distribution +----------------------------- + +This gives the average rate state distribution against each opponent. +A state corresponds to 1 +turn of a match and can be one of :code:`('C', 'C'), ('C', 'D'), ('D', 'C'), +('D', 'D')` where the first element is the action of the player in question and +the second the action of the opponent:: + + >>> pprint.pprint(results.normalised_state_distribution) + [[Counter(), + Counter({('C', 'D'): 1.0}), + Counter({('C', 'C'): 1.0}), + Counter({('C', 'C'): 1.0})], + [Counter({('D', 'C'): 1.0}), + Counter(), + Counter({('D', 'D'): 0.9, ('D', 'C'): 0.1}), + Counter({('D', 'D'): 0.9, ('D', 'C'): 0.1})], + [Counter({('C', 'C'): 1.0}), + Counter({('D', 'D'): 0.9, ('C', 'D'): 0.1}), + Counter(), + Counter({('C', 'C'): 1.0})], + [Counter({('C', 'C'): 1.0}), + Counter({('D', 'D'): 0.9, ('C', 'D'): 0.1}), + Counter({('C', 'C'): 1.0}), + Counter()]] + Morality Metrics ---------------- @@ -242,10 +299,10 @@ that summarises the results of the tournament:: >>> summary = results.summarise() >>> pprint.pprint(summary) - [Player(Rank=0, Name='Defector', Median_score=2.6..., Cooperation_rating=0.0, Wins=3.0), - Player(Rank=1, Name='Tit For Tat', Median_score=2.3..., Cooperation_rating=0.7, Wins=0.0), - Player(Rank=2, Name='Grudger', Median_score=2.3..., Cooperation_rating=0.7, Wins=0.0), - Player(Rank=3, Name='Cooperator', Median_score=2.0..., Cooperation_rating=1.0, Wins=0.0)] + [Player(Rank=0, Name='Defector', Median_score=2.6..., Cooperation_rating=0.0, Wins=3.0, CC_rate=...), + Player(Rank=1, Name='Tit For Tat', Median_score=2.3..., Cooperation_rating=0.7, Wins=0.0, CC_rate=...), + Player(Rank=2, Name='Grudger', Median_score=2.3..., Cooperation_rating=0.7, Wins=0.0, CC_rate=...), + Player(Rank=3, Name='Cooperator', Median_score=2.0..., Cooperation_rating=1.0, Wins=0.0, CC_rate=...)] It is also possible to write this data directly to a csv file using the `write_summary` method:: @@ -256,8 +313,8 @@ It is also possible to write this data directly to a csv file using the ... csvreader = csv.reader(outfile) ... for row in csvreader: ... print(row) - ['Rank', 'Name', 'Median_score', 'Cooperation_rating', 'Wins'] - ['0', 'Defector', '2.6...', '0.0', '3.0'] - ['1', 'Tit For Tat', '2.3...', '0.7', '0.0'] - ['2', 'Grudger', '2.3...', '0.7', '0.0'] - ['3', 'Cooperator', '2.0...', '1.0', '0.0'] + ['Rank', 'Name', 'Median_score', 'Cooperation_rating', 'Wins', 'CC_rate', 'CD_rate', 'DC_rate', 'DD_rate'] + ['0', 'Defector', '2.6...', '0.0', '3.0', '0.0', '0.0', '0.4...', '0.6...'] + ['1', 'Tit For Tat', '2.3...', '0.7', '0.0', '0.66...', '0.03...', '0.0', '0.3...'] + ['2', 'Grudger', '2.3...', '0.7', '0.0', '0.66...', '0.03...', '0.0', '0.3...'] + ['3', 'Cooperator', '2.0...', '1.0', '0.0', '0.66...', '0.33...', '0.0', '0.0']