This repository has been archived by the owner on Jun 22, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 170
/
neptune_random_search.yaml
119 lines (106 loc) · 3.44 KB
/
neptune_random_search.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
project: ORGANIZATION/home-credit
name: home-credit-default-risk
tags: [solution-2]
metric:
channel: 'ROC_AUC'
goal: maximize
exclude:
- output
- imgs
- neptune.log
- offline_job.log
- .git
- .github
- .idea
- .ipynb_checkpoints
- Untitled.ipynb
parameters:
# Data
train_filepath: YOUR/PATH/TO/application_train.csv
test_filepath: YOUR/PATH/TO/application_test.csv
bureau_balance_filepath: YOUR/PATH/TO/bureau_balance_filepath.csv
bureau_filepath: YOUR/PATH/TO/bureau.csv
credit_card_balance_filepath: YOUR/PATH/TO/credit_card_balance.csv
installments_payments_filepath: YOUR/PATH/TO/installments_payments.csv
POS_CASH_balance_filepath: YOUR/PATH/TO/POS_CASH_balance.csv
previous_application_filepath: YOUR/PATH/TO/previous_application.csv
sample_submission_filepath: YOUR/PATH/TO/sample_submission.csv
experiment_directory: YOUR/PATH/WORKDIR
# Kaggle
kaggle_api: 0
kaggle_message: 'solution-2'
# Data preparation
validation_size: 0.2
shuffle: 1
# Execution
clean_experiment_directory_before_training: 1
num_workers: 16
verbose: 1
# Preprocessing
fillna_value: -1
# Light GBM
lgbm_random_search_runs: 50
lgbm__device: cpu # gpu cpu
lgbm__boosting_type: gbdt
lgbm__objective: binary
lgbm__metric: auc
lgbm__number_boosting_rounds: 10000
lgbm__early_stopping_rounds: 100
lgbm__learning_rate: '[0.0005, 0.1, "log-uniform"]'
lgbm__num_leaves: '[20, 50]'
lgbm__max_depth: '[7, 30]'
lgbm__min_child_samples: '[20, 45]'
lgbm__max_bin: '[180, 500]' # at most 255 for device=gpu
lgbm__subsample: '[0.8, 0.9, 0.99, 0.6, 0.7, "list"]'
lgbm__subsample_freq: 0
lgbm__colsample_bytree: 0.8
lgbm__min_child_weight: 4
lgbm__reg_lambda: '[0.0, 0.1, "uniform"]'
lgbm__reg_alpha: '[0.0, 0.1, "uniform"]'
lgbm__scale_pos_weight: 1
# XGBoost
xgb_random_search_runs: 50
xgb__booster: gbtree
xgb__tree_method: hist # gpu_hist # auto hist
xgb__objective: binary:logistic
xgb__eval_metric: auc
xgb__nrounds: 10000
xgb__early_stopping_rounds: 100
xgb__eta: '[0.001, 0.1, "log-uniform"]'
xgb__max_leaves: '[21, 41, "choice"]'
xgb__max_depth: 16
xgb__max_bin: '[155, 355, "choice"]'
xgb__subsample: '[0.5, 1., "uniform"]'
xgb__colsample_bytree: '[0.5, 1., "uniform"]'
xgb__colsample_bylevel: 1
xgb__min_child_weight: 4
xgb__lambda: '[0.00001, 1.0, "log-uniform"]'
xgb__alpha: '[0.00001, 1.0, "log-uniform"]'
xgb__scale_pos_weight: 1
# Random forest
rf_random_search_runs: 50
rf__n_estimators: '[100, 500]'
rf__criterion: '["gini", "entropy", "list"]'
rf__max_features: '[0.01, 0.5, "uniform"]'
rf__min_samples_split: '[2, 50]'
rf__min_samples_leaf: '[1, 50]'
rf__class_weight: '[None, "balanced_subsample", "balanced", "list"]'
# Logistic regression
lr_random_search_runs: 50
lr__penalty: '["l2", "l1", "list"]'
lr__tol: '[0.00001, 0.01, "log-uniform"]'
lr__C: '[0.1, 100, "log-uniform"]'
lr__fit_intercept: '[0, 1, "list"]'
lr__class_weight: '[None, "balanced", "list"]'
lr__solver: '["liblinear", "saga", "list"]'
lr__max_iter: '[100, 1000, 10000, 50000, "list"]'
# SVC
svc_random_search_runs: 50
svc__kernel: '["poly", "rbf", "sigmoid", "list"]'
svc__C: '[0.1, 100, "log-uniform"]'
svc__degree: '[2, 7]'
svc__gamma: auto
svc__coef0: '[0.0, 1.0, "uniform"]'
svc__probability: True
svc__tol: '[0.00001, 0.01, "log-uniform"]'
svc__max_iter: '[-1, 100, 1000, 10000, 50000, "list"]'