SND(Self-supervised Network Distillation)

class srl.algorithms.snd.Config(observation_mode: Literal['', 'render_image'] = '', override_env_observation_type: srl.base.define.SpaceTypes = <SpaceTypes.UNKNOWN: 0>, override_observation_type: Union[str, srl.base.define.RLBaseTypes] = <RLBaseTypes.NONE: 1>, override_action_type: Union[str, srl.base.define.RLBaseTypes] = <RLBaseTypes.NONE: 1>, action_division_num: int = 10, observation_division_num: int = 1000, frameskip: int = 0, extend_worker: Optional[Type[ForwardRef('ExtendWorker')]] = None, processors: List[ForwardRef('RLProcessor')] = <factory>, render_image_processors: List[ForwardRef('RLProcessor')] = <factory>, enable_rl_processors: bool = True, enable_state_encode: bool = True, enable_action_decode: bool = True, window_length: int = 1, render_image_window_length: int = 1, render_last_step: bool = True, render_rl_image: bool = True, render_rl_image_size: Tuple[int, int] = (128, 128), enable_sanitize: bool = True, enable_assertion: bool = False, dtype: str = 'float32', test_epsilon: float = 0, epsilon: float = 0.001, epsilon_scheduler: srl.rl.schedulers.scheduler.SchedulerConfig = <factory>, lr: float = 0.001, lr_scheduler: srl.rl.schedulers.lr_scheduler.LRSchedulerConfig = <factory>, batch_size: int = 32, memory: srl.rl.memories.replay_buffer.ReplayBufferConfig = <factory>, discount: float = 0.99, target_model_update_interval: int = 1000, int_reward_scale: float = 0.5, input_value_block: srl.rl.models.config.input_value_block.InputValueBlockConfig = <factory>, input_image_block: srl.rl.models.config.input_image_block.InputImageBlockConfig = <factory>)
test_epsilon: float = 0

ε-greedy parameter for Test

epsilon: float = 0.001

ε-greedy parameter for Train

epsilon_scheduler: SchedulerConfig

<Scheduler>

lr: float = 0.001

Learning rate

lr_scheduler: LRSchedulerConfig

<LRSchaduler>

batch_size: int = 32

Batch size

memory: ReplayBufferConfig

<ReplayBuffer>

discount: float = 0.99

Discount rate

target_model_update_interval: int = 1000

Synchronization interval to Target network

int_reward_scale: float = 0.5

int reward scale

input_value_block: InputValueBlockConfig

<InputValueBlock>

input_image_block: InputImageBlockConfig

<InputImageBlock>

hidden_block: MLPBlockConfig

<MLPBlock> hidden layer