public static void QLCoCoTest(){ GridGame gridGame = new GridGame(); final OOSGDomain domain = gridGame.generateDomain(); final State s = GridGame.getPrisonersDilemmaInitialState(); JointRewardFunction rf = new GridGame.GGJointRewardFunction(domain, -1, 100, false); TerminalFunction tf = new GridGame.GGTerminalFunction(domain); SGAgentType at = GridGame.getStandardGridGameAgentType(domain);
public static void saInterface(){ GridGame gridGame = new GridGame(); final OOSGDomain domain = gridGame.generateDomain(); final State s = GridGame.getSimpleGameInitialState(); JointRewardFunction rf = new GridGame.GGJointRewardFunction(domain, -1, 100, false); TerminalFunction tf = new GridGame.GGTerminalFunction(domain); SGAgentType at = GridGame.getStandardGridGameAgentType(domain);
public static void main(String[] args) { GridGame gg = new GridGame(); OOSGDomain domain = gg.generateDomain(); State s = GridGame.getTurkeyInitialState(); JointRewardFunction jr = new GridGame.GGJointRewardFunction(domain); TerminalFunction tf = new GridGame.GGTerminalFunction(domain); World world = new World(domain, jr, tf, new ConstantStateGenerator(s)); DPrint.toggleCode(world.getDebugId(),false); SGAgent ragent1 = new RandomSGAgent(); SGAgent ragent2 = new RandomSGAgent(); SGAgentType type = new SGAgentType("agent", domain.getActionTypes()); world.join(ragent1); world.join(ragent2); GameEpisode ga = world.runGame(20); System.out.println(ga.maxTimeStep()); String serialized = ga.serialize(); System.out.println(serialized); GameEpisode read = GameEpisode.parse(serialized); System.out.println(read.maxTimeStep()); System.out.println(read.state(0).toString()); }
/** * Returns the initial state for a classic coordination game, where the agent's personal goals are on opposite sides. * @return the coordination game initial state */ public static State getCorrdinationGameInitialState(){ GenericOOState s = new GenericOOState( new GGAgent(0, 0, 0, "agent0"), new GGAgent(2, 0, 1, "agent1"), new GGGoal(0, 2, 2, "g0"), new GGGoal(2, 2, 1, "g1") ); setBoundaryWalls(s, 3, 3); return s; }
@Override public OOSGDomain generateDomain() { OOSGDomain domain = new OOSGDomain(); domain.addStateClass(CLASS_AGENT, GGAgent.class) .addStateClass(CLASS_GOAL, GGGoal.class) .addStateClass(CLASS_DIM_H_WALL, GGWall.GGHorizontalWall.class) .addStateClass(CLASS_DIM_V_WALL, GGWall.GGVerticalWall.class); domain.addActionType(new UniversalActionType(ACTION_NORTH)) .addActionType(new UniversalActionType(ACTION_SOUTH)) .addActionType(new UniversalActionType(ACTION_EAST)) .addActionType(new UniversalActionType(ACTION_WEST)) .addActionType(new UniversalActionType(ACTION_NOOP)); OODomain.Helper.addPfsToDomain(domain, this.generatePFs()); domain.setJointActionModel(new GridGameStandardMechanics(domain, this.semiWallProb)); return domain; }
/** * Returns the initial state for a simple game in which both players can win without interfering with one another. * @return the simple game initial state */ public static State getSimpleGameInitialState(){ GenericOOState s = new GenericOOState( new GGAgent(0, 0, 0, "agent0"), new GGAgent(2, 0, 1, "agent1"), new GGGoal(0, 2, 1, "g0"), new GGGoal(2, 2, 2, "g1") ); setBoundaryWalls(s, 3, 3); return s; }
public static void VICorrelatedTest(){ GridGame gridGame = new GridGame(); final OOSGDomain domain = gridGame.generateDomain(); final HashableStateFactory hashingFactory = new SimpleHashableStateFactory(); final State s = GridGame.getPrisonersDilemmaInitialState(); JointRewardFunction rf = new GridGame.GGJointRewardFunction(domain, -1, 100, false); TerminalFunction tf = new GridGame.GGTerminalFunction(domain); SGAgentType at = GridGame.getStandardGridGameAgentType(domain); MAValueIteration vi = new MAValueIteration(domain, rf, tf, 0.99, hashingFactory, 0., new CorrelatedQ(CorrelatedEquilibriumSolver.CorrelatedEquilibriumObjective.UTILITARIAN), 0.00015, 50); World w = new World(domain, rf, tf, s); //for correlated Q, use a correlated equilibrium policy joint policy ECorrelatedQJointPolicy jp0 = new ECorrelatedQJointPolicy(CorrelatedEquilibriumSolver.CorrelatedEquilibriumObjective.UTILITARIAN, 0.); MultiAgentDPPlanningAgent a0 = new MultiAgentDPPlanningAgent(domain, vi, new PolicyFromJointPolicy(0, jp0, true), "agent0", at); MultiAgentDPPlanningAgent a1 = new MultiAgentDPPlanningAgent(domain, vi, new PolicyFromJointPolicy(1, jp0, true), "agent1", at); w.join(a0); w.join(a1); GameEpisode ga = null; List<GameEpisode> games = new ArrayList<GameEpisode>(); for(int i = 0; i < 10; i++){ ga = w.runGame(); games.add(ga); } Visualizer v = GGVisualizer.getVisualizer(9, 9); new GameSequenceVisualizer(v, domain, games); }
GridGame gg = new GridGame(); OOSGDomain d = gg.generateDomain(); State s = GridGame.getTurkeyInitialState();
/** * Returns the initial state for Friend Foe game. * @return the initial state for Friend Foe */ public static State getFriendFoeInitialState(){ GenericOOState s = new GenericOOState( new GGAgent(3, 0 ,0, "agent0"), new GGAgent(6, 0, 1, "agent1"), new GGGoal(0, 0, 1, "g0"), new GGGoal(4, 0, 0, "g1") ); setBoundaryWalls(s, 8, 1); return s; }
public static void VICoCoTest(){ GridGame gridGame = new GridGame(); final OOSGDomain domain = gridGame.generateDomain(); final State s = GridGame.getPrisonersDilemmaInitialState(); SGAgentType at = GridGame.getStandardGridGameAgentType(domain);
/** * Returns the initial state for the Incredible game (a game in which player 0 can give an incredible threat). * @return the initial state for the Incredible game. */ public static State getIncredibleInitialState(){ GenericOOState s = new GenericOOState( new GGAgent(2, 0, 0, "agent0"), new GGAgent(3, 0, 1, "agent1"), new GGGoal(0, 0, 1, "g0"), new GGGoal(1, 0, 2, "g1") ); setBoundaryWalls(s, 4, 1); return s; }
/** * Returns the initial state for a classic prisoner's dilemma formulated in a Grid Game. * @return the grid game prisoner's dilemma initial state */ public static State getPrisonersDilemmaInitialState(){ GenericOOState s = new GenericOOState( new GGAgent(3, 0, 0, "agent0"), new GGAgent(5, 0, 1, "agent1"), new GGGoal(0, 0, 1, "g0"), new GGGoal(4, 0, 0, "g1"), new GGGoal(8, 0, 2, "g2") ); setBoundaryWalls(s, 9, 1); return s; }
public static State getTurkeyInitialState(){ GenericOOState s = new GenericOOState( new GGAgent(0, 0, 0, "agent0"), new GGAgent(2, 0, 1, "agent1"), new GGGoal(0, 3, 1, "g0"), new GGGoal(1, 2, 0, "g1"), new GGGoal(2, 3, 2, "g2"), new GGWall.GGHorizontalWall(0, 0, 1, 1, "w0"), new GGWall.GGHorizontalWall(2, 2, 1, 1, "w1") ); setBoundaryWalls(s, 3, 4); return s; }