/** * Creates s SGWorldShell with a new world using the domain and using std in and std out. * @param domain the SGDomain * @param s the state in which the world will be rooted */ public SGWorldShell(SGDomain domain, State s){ this(domain, System.in, System.out, new World(domain, new NullJointRewardFunction(), new NullTermination(), s)); }
/** * Initializes the data members for the visual explorer. * @param domain the stochastic game domain to be explored * @param painter the 2D visualizer for states * @param baseState the initial state from which to explore * @param w the width of the state visualizer * @param h the height of the state visualizer */ public SGVisualExplorer(SGDomain domain, Visualizer painter, State baseState, int w, int h){ this.init(domain, new World(domain, new NullJointRewardFunction(), new NullTermination(), baseState), painter, w, h); }
/** * A main method showing example code that would be used to create an instance of Prisoner's dilemma and begin playing it with a * {@link SGWorldShell}. * @param args command line args */ public static void main(String [] args){ SingleStageNormalFormGame game = SingleStageNormalFormGame.getPrisonersDilemma(); SGDomain domain = (SGDomain)game.generateDomain(); JointRewardFunction r = game.getJointRewardFunction(); World w = new World(domain, r, new NullTermination(), (State)new NFGameState(2)); SGWorldShell shell = new SGWorldShell(domain, w); shell.start(); }
/** * Creates a world instance for this game in which the provided agents join in the order they are passed. This object * uses the provided domain instance generated from this object instead of generating a new one. * @param domain the SGDomain instance * @param agents the agents to join the created world. * @return a world instance with the provided agents having already joined. */ public World createRepeatedGameWorld(SGDomain domain, SGAgent...agents){ //grab the joint reward function from our bimatrix game in the more general BURLAP joint reward function interface JointRewardFunction jr = this.getJointRewardFunction(); //game repeats forever unless manually stopped after T times. TerminalFunction tf = new NullTermination(); //set up the initial state generator for the world, which for a bimatrix game is trivial StateGenerator sg = new NFGameState(agents.length); //create a world to synchronize the actions of agents in this domain and record results World w = new World(domain, jr, tf, sg); for(SGAgent a : agents){ w.join(a); } return w; }
@Override public OOSADomain generateDomain() { OOSADomain domain = new OOSADomain(); domain.addStateClass(CLASS_BLOCK, BlocksWorldBlock.class); domain.addActionType(new StackActionType(ACTION_STACK)) .addActionType(new UnstackActionType(ACTION_UNSTACK)); RewardFunction rf = this.rf; TerminalFunction tf = this.tf; if(rf == null){ rf = new NullRewardFunction(); } if(tf == null){ tf = new NullTermination(); } BWModel smodel = new BWModel(); FactoredModel model = new FactoredModel(smodel, rf , tf); domain.setModel(model); OODomain.Helper.addPfsToDomain(domain, this.generatePfs()); return domain; }
@Override public OOSADomain generateDomain() { OOSADomain domain = new OOSADomain(); int [][] cmap = this.getMap(); domain.addStateClass(CLASS_AGENT, GridAgent.class).addStateClass(CLASS_LOCATION, GridLocation.class); GridWorldModel smodel = new GridWorldModel(cmap, getTransitionDynamics()); RewardFunction rf = this.rf; TerminalFunction tf = this.tf; if(rf == null){ rf = new UniformCostRF(); } if(tf == null){ tf = new NullTermination(); } FactoredModel model = new FactoredModel(smodel, rf, tf); domain.setModel(model); domain.addActionTypes( new UniversalActionType(ACTION_NORTH), new UniversalActionType(ACTION_SOUTH), new UniversalActionType(ACTION_EAST), new UniversalActionType(ACTION_WEST)); OODomain.Helper.addPfsToDomain(domain, this.generatePfs()); return domain; }