Rainbow

class srl.algorithms.rainbow.rainbow.Config(framework: ~typing.Literal['auto', 'tensorflow', 'torch'] = 'auto', observation_mode: ~typing.Literal['', 'render_image'] = '', override_env_observation_type: ~srl.base.define.SpaceTypes = SpaceTypes.UNKNOWN, override_observation_type: str | ~srl.base.define.RLBaseTypes = <RLBaseTypes.NONE: 1>, override_action_type: str | ~srl.base.define.RLBaseTypes = <RLBaseTypes.NONE: 1>, action_division_num: int = 10, observation_division_num: int = 1000, frameskip: int = 0, extend_worker: ~typing.Type[ExtendWorker] | None = None, processors: ~typing.List[RLProcessor] = <factory>, render_image_processors: ~typing.List[RLProcessor] = <factory>, enable_rl_processors: bool = True, enable_state_encode: bool = True, enable_action_decode: bool = True, window_length: int = 1, render_image_window_length: int = 1, render_last_step: bool = True, render_rl_image: bool = True, render_rl_image_size: ~typing.Tuple[int, int] = (128, 128), enable_sanitize: bool = True, enable_assertion: bool = False, dtype: str = 'float32', test_epsilon: float = 0, batch_size: int = 32, memory: ~srl.rl.memories.priority_replay_buffer.PriorityReplayBufferConfig = <factory>, actor_epsilon: float = 0.4, actor_alpha: float = 7.0, epsilon: float = 0.1, epsilon_scheduler: ~srl.rl.schedulers.scheduler.SchedulerConfig = <factory>, lr: float = 0.001, lr_scheduler: ~srl.rl.schedulers.lr_scheduler.LRSchedulerConfig = <factory>, input_value_block: ~srl.rl.models.config.input_value_block.InputValueBlockConfig = <factory>, input_image_block: ~srl.rl.models.config.input_image_block.InputImageBlockConfig = <factory>, discount: float = 0.99, target_model_update_interval: int = 1000, enable_reward_clip: bool = False, enable_double_dqn: bool = True, enable_noisy_dense: bool = False, enable_rescale: bool = False, multisteps: int = 3, retrace_h: float = 1.0)

<RLConfigComponentFramework>

test_epsilon: float = 0

ε-greedy parameter for Test

batch_size: int = 32

Batch size

memory: PriorityReplayBufferConfig

<PriorityReplayBuffer>

actor_epsilon: float = 0.4

Learning rate during distributed learning \(\epsilon_i = \epsilon^{1 + \frac{i}{N-1} \alpha}\)

actor_alpha: float = 7.0

Look actor_epsilon

epsilon: float = 0.1

ε-greedy parameter for Train

epsilon_scheduler: SchedulerConfig

<Scheduler>

lr: float = 0.001

Learning rate

lr_scheduler: LRSchedulerConfig

<LRSchaduler>

input_value_block: InputValueBlockConfig

<InputValueBlock>

input_image_block: InputImageBlockConfig

<InputImageBlock>

hidden_block: DuelingNetworkConfig

<DuelingNetwork> hidden layer

discount: float = 0.99

Discount rate

target_model_update_interval: int = 1000

Synchronization interval to Target network

enable_reward_clip: bool = False

If True, clip the reward to three types [-1,0,1]

enable_double_dqn: bool = True

enable DoubleDQN

enable_noisy_dense: bool = False

noisy dense

enable_rescale: bool = False

enable rescaling

multisteps: int = 3

Multi-step learning

retrace_h: float = 1.0

retrace parameter h