forked from DLR-RM/stable-baselines3
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.py
178 lines (146 loc) · 5.63 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import os
from setuptools import find_packages, setup
with open(os.path.join("stable_baselines3", "version.txt")) as file_handler:
__version__ = file_handler.read().strip()
long_description = """
# Stable Baselines3
Stable Baselines3 is a set of reliable implementations of reinforcement learning algorithms in PyTorch. It is the next major version of [Stable Baselines](https://github.com/hill-a/stable-baselines).
These algorithms will make it easier for the research community and industry to replicate, refine, and identify new ideas, and will create good baselines to build projects on top of. We expect these tools will be used as a base around which new ideas can be added, and as a tool for comparing a new approach against existing ones. We also hope that the simplicity of these tools will allow beginners to experiment with a more advanced toolset, without being buried in implementation details.
## Links
Repository:
https://github.com/DLR-RM/stable-baselines3
Blog post:
https://araffin.github.io/post/sb3/
Documentation:
https://stable-baselines3.readthedocs.io/en/master/
RL Baselines3 Zoo:
https://github.com/DLR-RM/rl-baselines3-zoo
SB3 Contrib:
https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
## Quick example
Most of the library tries to follow a sklearn-like syntax for the Reinforcement Learning algorithms using Gym.
Here is a quick example of how to train and run PPO on a cartpole environment:
```python
import gym
from stable_baselines3 import PPO
env = gym.make("CartPole-v1")
model = PPO("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=10_000)
vec_env = model.get_env()
obs = vec_env.reset()
for i in range(1000):
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = vec_env.step(action)
vec_env.render()
# VecEnv resets automatically
# if done:
# obs = vec_env.reset()
```
Or just train a model with a one liner if [the environment is registered in Gym](https://www.gymlibrary.ml/content/environment_creation/) and if [the policy is registered](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html):
```python
from stable_baselines3 import PPO
model = PPO("MlpPolicy", "CartPole-v1").learn(10_000)
```
""" # noqa:E501
# Atari Games download is sometimes problematic:
# https://github.com/Farama-Foundation/AutoROM/issues/39
# That's why we define extra packages without it.
extra_no_roms = [
# For render
"opencv-python",
# Tensorboard support
"tensorboard>=2.9.1",
# Checking memory taken by replay buffer
"psutil",
# For progress bar callback
"tqdm",
"rich",
# For atari games,
"ale-py==0.7.4",
"pillow",
]
extra_packages = extra_no_roms + [ # noqa: RUF005
# For atari roms,
"autorom[accept-rom-license]~=0.5.5",
]
setup(
name="stable_baselines3",
packages=[package for package in find_packages() if package.startswith("stable_baselines3")],
package_data={"stable_baselines3": ["py.typed", "version.txt"]},
install_requires=[
"gym==0.21", # Fixed version due to breaking changes in 0.22
"numpy",
"torch>=1.11",
'typing_extensions>=4.0,<5; python_version < "3.8.0"',
# For saving models
"cloudpickle",
# For reading logs
"pandas",
# Plotting learning curves
"matplotlib",
# gym not compatible with importlib-metadata>5.0
"importlib-metadata~=4.13",
],
extras_require={
"tests": [
# Run tests and coverage
"pytest",
"pytest-cov",
"pytest-env",
"pytest-xdist",
# Type check
"pytype",
"mypy",
# Lint code (flake8 replacement)
"ruff",
# Sort imports
"isort>=5.0",
# Reformat
"black",
# For toy text Gym envs
"scipy>=1.4.1",
],
"docs": [
"sphinx",
"sphinx-autobuild",
"sphinx-rtd-theme",
# For spelling
"sphinxcontrib.spelling",
# Type hints support
"sphinx-autodoc-typehints==1.21.1", # TODO: remove version constraint, see #1290
# Copy button for code snippets
"sphinx_copybutton",
],
"extra": extra_packages,
"extra_no_roms": extra_no_roms,
},
description="Pytorch version of Stable Baselines, implementations of reinforcement learning algorithms.",
author="Antonin Raffin",
url="https://github.com/DLR-RM/stable-baselines3",
author_email="[email protected]",
keywords="reinforcement-learning-algorithms reinforcement-learning machine-learning "
"gym openai stable baselines toolbox python data-science",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
version=__version__,
python_requires=">=3.7",
# PyPI package information.
project_urls={
"Code": "https://github.com/DLR-RM/stable-baselines3",
"Documentation": "https://stable-baselines3.readthedocs.io/",
"SB3-Contrib": "https://github.com/Stable-Baselines-Team/stable-baselines3-contrib",
"RL-Zoo": "https://github.com/DLR-RM/rl-baselines3-zoo",
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)
# python setup.py sdist
# python setup.py bdist_wheel
# twine upload --repository-url https://test.pypi.org/legacy/ dist/*
# twine upload dist/*