Skip to main content

PyForce - A simple reinforcement learning library

Project description

Status


🧐 About

A simple and modular reinforcement learning library based on PyTorch.

🏁 Getting Started

pip install pyforce-rl

🎈 Usage

from pyforce.env import DictEnv, ActionSpaceScaler, TorchEnv
from pyforce.nn.observation import ObservationProcessor
from pyforce.nn.hidden import HiddenLayers
from pyforce.nn.action import ActionMapper
from pyforce.agents import PPOAgent
import gym
import torch

device="cuda:0" if torch.cuda.is_available() else "cpu"

env=gym.make("LunarLanderContinuous-v2")
env=DictEnv(env)
env=ActionSpaceScaler(env)
env=TorchEnv(env).to(device)

observation_processor=ObservationProcessor(env)
hidden_layers=HiddenLayers(observation_processor.n_output)
action_mapper=ActionMapper(env,hidden_layers.n_output)

agent=PPOAgent(
	observation_processor,
	hidden_layers,
	action_mapper,
	save_path="./evals/ppo_example",
	value_lr=5e-4,
	policy_lr=5e-4
).to(device)

agent.train(env,episodes=1000,train_freq=2048,eval_freq=50,render=True, batch_size=128,gamma=.99,tau=.95,clip=.2,n_steps=32,entropy_coef=.01)

🚀 Implement custom RL Agents

from pyforce.agents.base import BaseAgent
from torch import nn

class  MyAgent(BaseAgent):

def  __init__(self,observationprocessor,hiddenlayers,actionmapper,save_path=None):

	super().__init__(save_path)

	self.policy_net = nn.Sequential(observationprocessor, hiddenlayers, actionmapper)
	self.value_net = ...

def  forward(self, state):
	return  self.policy_net(state)

def  get_action(self, state, eval, args):
	#return action + possible additional information to be stored in the memory
	return  self(state).sample(), {} 

def  after_step(self, done, eval, args):
	if  not  eval:
		if  self.env_steps % args["train_freq"] == 0 and len(self.memory) > 0:
			#do training

	if done and eval:
		#do evaluation

⛏️ Built Using

Project details


Download files

Download the file for your platform. If you're not sure which to choose, learn more about installing packages.

Source Distributions

No source distribution files available for this release.See tutorial on generating distribution archives.

Built Distribution

pyforce_rl-0.0.5-py3-none-any.whl (17.2 kB view hashes)

Uploaded Python 3

Supported by

AWS AWS Cloud computing and Security Sponsor Datadog Datadog Monitoring Fastly Fastly CDN Google Google Download Analytics Microsoft Microsoft PSF Sponsor Pingdom Pingdom Monitoring Sentry Sentry Error logging StatusPage StatusPage Status page